From 3c7e1155265b1742e9da67b9b3b97b518d1c9f4b Mon Sep 17 00:00:00 2001 From: Farshid Tavakolizadeh Date: Wed, 26 Feb 2020 12:09:33 +0100 Subject: [PATCH] switched to go modules for dependency management --- .gitignore | 2 + Gopkg.lock | 93 - Gopkg.toml | 62 - catalog/http.go | 5 +- catalog/remoteclient.go | 2 +- catalog/td_schema.go | 2 +- catalog/utils.go | 4 +- catalog/wot_thing_description.go | 5 +- config.go | 4 +- go.mod | 23 + go.sum | 89 + main.go | 12 +- registration.go | 4 +- .../code.linksmart.eu/com/go-sec/.gitignore | 3 - .../code.linksmart.eu/com/go-sec/Gopkg.lock | 15 - .../code.linksmart.eu/com/go-sec/Gopkg.toml | 26 - vendor/code.linksmart.eu/com/go-sec/README.md | 6 - .../com/go-sec/auth/README.md | 6 - .../com/go-sec/authz/README.md | 4 - .../sc/service-catalog/.gitignore | 3 - .../sc/service-catalog/Dockerfile | 17 - .../sc/service-catalog/Gopkg.lock | 87 - .../sc/service-catalog/Gopkg.toml | 58 - .../sc/service-catalog/README.md | 17 - .../sc/service-catalog/apidoc/swagger.json | 493 - .../sc/service-catalog/config.go | 147 - .../sc/service-catalog/discovery/init.go | 20 - .../sc/service-catalog/init.go | 20 - .../sc/service-catalog/main.go | 185 - .../sc/service-catalog/router.go | 53 - .../service-catalog/sample_conf/docker.json | 12 - .../sample_conf/service-catalog-auth.json | 37 - .../sample_conf/service-catalog.json | 14 - .../sc/service-catalog/service/catalog.go | 109 - .../sc/service-catalog/service/catalogapi.go | 289 - .../service/catalogapi_test.go | 485 - .../sc/service-catalog/service/client.go | 36 - .../sc/service-catalog/service/constants.go | 20 - .../sc/service-catalog/service/controller.go | 341 - .../service/controller_test.go | 315 - .../sc/service-catalog/service/doc.go | 5 - .../sc/service-catalog/service/errors.go | 48 - .../sc/service-catalog/service/gcpublisher.go | 255 - .../sc/service-catalog/service/init.go | 20 - .../sc/service-catalog/service/main_test.go | 33 - .../service-catalog/service/remoteclient.go | 327 - .../service/service-registrar.go | 185 - .../sc/service-catalog/utils/init.go | 20 - .../ancientlore/go-avltree/.travis.yml | 2 +- .../github.com/ancientlore/go-avltree/go.mod | 1 + .../ancientlore/go-avltree/objecttree.go | 10 +- .../ancientlore/go-avltree/objecttree_test.go | 247 - .../ancientlore/go-avltree/pairtree.go | 20 +- .../ancientlore/go-avltree/pairtree_test.go | 232 - .../ancientlore/go-avltree/stringtree.go | 15 +- .../ancientlore/go-avltree/stringtree_test.go | 237 - .../github.com/ancientlore/go-avltree/tree.go | 33 +- .../ancientlore/go-avltree/tree_test.go | 246 - .../ancientlore/go-avltree/treeprint.go | 1 + .../ancientlore/go-avltree/treeremove.go | 2 +- .../codegangsta/negroni/logger_test.go | 33 - .../codegangsta/negroni/negroni_test.go | 87 - .../codegangsta/negroni/recovery_test.go | 28 - .../negroni/response_writer_test.go | 150 - .../codegangsta/negroni/static_test.go | 113 - .../negroni/translations/README_de_de.md | 177 - .../negroni/translations/README_pt_br.md | 170 - .../negroni/translations/README_zh_cn.md | 182 - .../negroni/translations/README_zh_tw.md | 177 - .../dgrijalva/jwt-go/cmd/jwt/README.md | 13 - .../dgrijalva/jwt-go/cmd/jwt/app.go | 245 - .../github.com/dgrijalva/jwt-go/ecdsa_test.go | 100 - .../dgrijalva/jwt-go/example_test.go | 114 - .../dgrijalva/jwt-go/hmac_example_test.go | 64 - .../github.com/dgrijalva/jwt-go/hmac_test.go | 91 - .../dgrijalva/jwt-go/http_example_test.go | 216 - .../github.com/dgrijalva/jwt-go/none_test.go | 72 - .../dgrijalva/jwt-go/parser_test.go | 252 - .../dgrijalva/jwt-go/request/doc.go | 7 - .../dgrijalva/jwt-go/request/extractor.go | 81 - .../jwt-go/request/extractor_example_test.go | 32 - .../jwt-go/request/extractor_test.go | 91 - .../dgrijalva/jwt-go/request/oauth2.go | 28 - .../dgrijalva/jwt-go/request/request.go | 24 - .../dgrijalva/jwt-go/request/request_test.go | 103 - .../dgrijalva/jwt-go/rsa_pss_test.go | 96 - .../github.com/dgrijalva/jwt-go/rsa_test.go | 176 - .../dgrijalva/jwt-go/test/ec256-private.pem | 5 - .../dgrijalva/jwt-go/test/ec256-public.pem | 4 - .../dgrijalva/jwt-go/test/ec384-private.pem | 6 - .../dgrijalva/jwt-go/test/ec384-public.pem | 5 - .../dgrijalva/jwt-go/test/ec512-private.pem | 7 - .../dgrijalva/jwt-go/test/ec512-public.pem | 6 - .../dgrijalva/jwt-go/test/helpers.go | 42 - .../dgrijalva/jwt-go/test/hmacTestKey | 1 - .../dgrijalva/jwt-go/test/sample_key | 27 - .../dgrijalva/jwt-go/test/sample_key.pub | 9 - .../eclipse/paho.mqtt.golang/.gitignore | 36 + .../eclipse/paho.mqtt.golang/CONTRIBUTING.md | 56 + .../eclipse/paho.mqtt.golang/DISTRIBUTION | 15 + .../eclipse/paho.mqtt.golang/LICENSE | 87 + .../eclipse/paho.mqtt.golang/README.md | 67 + .../eclipse/paho.mqtt.golang/about.html | 41 + .../eclipse/paho.mqtt.golang/client.go | 759 + .../eclipse/paho.mqtt.golang/components.go | 31 + .../eclipse/paho.mqtt.golang/edl-v10 | 15 + .../eclipse/paho.mqtt.golang/epl-v10 | 70 + .../eclipse/paho.mqtt.golang/filestore.go | 255 + .../eclipse/paho.mqtt.golang/memstore.go | 138 + .../eclipse/paho.mqtt.golang/message.go | 127 + .../eclipse/paho.mqtt.golang/messageids.go | 117 + .../eclipse/paho.mqtt.golang/net.go | 355 + .../eclipse/paho.mqtt.golang/notice.html | 108 + .../eclipse/paho.mqtt.golang/oops.go | 21 + .../eclipse/paho.mqtt.golang/options.go | 340 + .../paho.mqtt.golang/options_reader.go | 149 + .../paho.mqtt.golang/packets/connack.go | 55 + .../paho.mqtt.golang/packets/connect.go | 154 + .../paho.mqtt.golang/packets/disconnect.go | 36 + .../paho.mqtt.golang/packets/packets.go | 346 + .../paho.mqtt.golang/packets/pingreq.go | 36 + .../paho.mqtt.golang/packets/pingresp.go | 36 + .../paho.mqtt.golang/packets/puback.go | 45 + .../paho.mqtt.golang/packets/pubcomp.go | 45 + .../paho.mqtt.golang/packets/publish.go | 88 + .../paho.mqtt.golang/packets/pubrec.go | 45 + .../paho.mqtt.golang/packets/pubrel.go | 45 + .../paho.mqtt.golang/packets/suback.go | 60 + .../paho.mqtt.golang/packets/subscribe.go | 72 + .../paho.mqtt.golang/packets/unsuback.go | 45 + .../paho.mqtt.golang/packets/unsubscribe.go | 59 + .../eclipse/paho.mqtt.golang/ping.go | 69 + .../eclipse/paho.mqtt.golang/router.go | 187 + .../eclipse/paho.mqtt.golang/store.go | 136 + .../eclipse/paho.mqtt.golang/token.go | 184 + .../eclipse/paho.mqtt.golang/topic.go | 82 + .../eclipse/paho.mqtt.golang/trace.go | 40 + vendor/github.com/farshidtz/elog/.travis.yml | 4 + vendor/github.com/farshidtz/elog/LICENSE | 21 + vendor/github.com/farshidtz/elog/README.md | 156 + vendor/github.com/farshidtz/elog/config.go | 99 + vendor/github.com/farshidtz/elog/doc.go | 30 + vendor/github.com/farshidtz/elog/go.mod | 3 + vendor/github.com/farshidtz/elog/logger.go | 97 + vendor/github.com/farshidtz/elog/writer.go | 33 + .../farshidtz/mqtt-match/.travis.yml | 4 + .../github.com/farshidtz/mqtt-match/LICENSE | 21 + .../github.com/farshidtz/mqtt-match/README.md | 21 + .../github.com/farshidtz/mqtt-match/match.go | 22 + .../golang/snappy/cmd/snappytool/main.cpp | 77 - .../github.com/golang/snappy/golden_test.go | 1965 -- vendor/github.com/golang/snappy/snappy.go | 17 +- .../github.com/golang/snappy/snappy_test.go | 1353 -- .../snappy/testdata/Mark.Twain-Tom.Sawyer.txt | 396 - .../Mark.Twain-Tom.Sawyer.txt.rawsnappy | Bin 9871 -> 0 bytes vendor/github.com/gorilla/context/.travis.yml | 8 +- vendor/github.com/gorilla/context/README.md | 3 + .../gorilla/context/context_test.go | 161 - vendor/github.com/gorilla/context/doc.go | 6 + vendor/github.com/gorilla/mux/bench_test.go | 49 - .../gorilla/mux/context_gorilla_test.go | 40 - .../gorilla/mux/context_native_test.go | 32 - vendor/github.com/gorilla/mux/mux_test.go | 1912 -- vendor/github.com/gorilla/mux/old_test.go | 704 - .../github.com/justinas/alice/chain_test.go | 154 - vendor/github.com/linksmart/go-sec/LICENSE | 202 + vendor/github.com/linksmart/go-sec/NOTICE | 10 + .../go-sec/auth/keycloak/obtainer/obtainer.go | 3 +- .../auth/keycloak/validator/validator.go | 11 +- .../linksmart}/go-sec/auth/obtainer/client.go | 0 .../go-sec/auth/obtainer/obtainer.go | 0 .../go-sec/auth/validator/handler.go | 4 +- .../go-sec/auth/validator/validator.go | 2 +- .../linksmart}/go-sec/authz/authz.go | 0 .../linksmart}/go-sec/authz/config.go | 0 .../linksmart/service-catalog}/LICENSE | 0 .../linksmart/service-catalog}/NOTICE | 0 .../service-catalog/discovery/README.md | 0 .../service-catalog/discovery/catalog.go | 0 .../service-catalog/discovery/init.go | 19 + .../service-catalog/discovery/utils.go | 3 - .../linksmart/service-catalog/v2}/LICENSE | 0 .../linksmart/service-catalog/v2}/NOTICE | 0 .../service-catalog/v2/catalog/catalog.go | 121 + .../service-catalog/v2/catalog/constants.go | 22 + .../service-catalog/v2/catalog/controller.go | 220 + .../service-catalog/v2/catalog/errors.go | 18 + .../service-catalog/v2/catalog/http.go | 244 + .../service-catalog/v2/catalog/init.go | 30 + .../service-catalog/v2/catalog}/ldbstorage.go | 47 +- .../service-catalog/v2/catalog}/memstorage.go | 32 +- .../service-catalog/v2/catalog/mqtt.go | 264 + .../service-catalog/v2/catalog/mqtt_conf.go | 103 + .../service-catalog/v2}/utils/http.go | 2 +- .../service-catalog/v2/utils/init.go | 17 + .../service-catalog/v2}/utils/pagination.go | 0 .../service-catalog/v2}/utils/pathfilter.go | 0 vendor/github.com/miekg/dns/client_test.go | 532 - .../github.com/miekg/dns/clientconfig_test.go | 87 - .../github.com/miekg/dns/compress_generate.go | 184 - vendor/github.com/miekg/dns/dns_bench_test.go | 211 - vendor/github.com/miekg/dns/dns_test.go | 453 - vendor/github.com/miekg/dns/dnssec_test.go | 733 - vendor/github.com/miekg/dns/dnsutil/util.go | 79 - .../github.com/miekg/dns/dnsutil/util_test.go | 130 - vendor/github.com/miekg/dns/dyn_test.go | 3 - vendor/github.com/miekg/dns/edns_test.go | 68 - vendor/github.com/miekg/dns/example_test.go | 146 - vendor/github.com/miekg/dns/fuzz_test.go | 25 - .../github.com/miekg/dns/idn/code_points.go | 2346 --- .../github.com/miekg/dns/idn/example_test.go | 18 - vendor/github.com/miekg/dns/idn/punycode.go | 370 - .../github.com/miekg/dns/idn/punycode_test.go | 116 - vendor/github.com/miekg/dns/issue_test.go | 68 - vendor/github.com/miekg/dns/labels_test.go | 203 - vendor/github.com/miekg/dns/msg_generate.go | 349 - vendor/github.com/miekg/dns/msg_test.go | 124 - vendor/github.com/miekg/dns/nsecx_test.go | 133 - vendor/github.com/miekg/dns/parse_test.go | 1540 -- vendor/github.com/miekg/dns/privaterr_test.go | 171 - vendor/github.com/miekg/dns/remote_test.go | 19 - vendor/github.com/miekg/dns/sanitize_test.go | 84 - vendor/github.com/miekg/dns/scan_test.go | 48 - vendor/github.com/miekg/dns/server_test.go | 719 - vendor/github.com/miekg/dns/sig0_test.go | 89 - vendor/github.com/miekg/dns/tsig_test.go | 52 - vendor/github.com/miekg/dns/types_generate.go | 271 - vendor/github.com/miekg/dns/types_test.go | 74 - vendor/github.com/miekg/dns/update_test.go | 145 - vendor/github.com/miekg/dns/xfr_test.go | 184 - .../github.com/pborman/uuid/marshal_test.go | 124 - vendor/github.com/pborman/uuid/seq_test.go | 66 - vendor/github.com/pborman/uuid/sql_test.go | 96 - vendor/github.com/pborman/uuid/uuid_test.go | 543 - vendor/github.com/satori/go.uuid/.travis.yml | 23 + vendor/github.com/satori/go.uuid/LICENSE | 20 + vendor/github.com/satori/go.uuid/README.md | 65 + vendor/github.com/satori/go.uuid/codec.go | 206 + vendor/github.com/satori/go.uuid/generator.go | 239 + vendor/github.com/satori/go.uuid/sql.go | 78 + vendor/github.com/satori/go.uuid/uuid.go | 161 + .../github.com/syndtr/goleveldb/.travis.yml | 12 - vendor/github.com/syndtr/goleveldb/README.md | 107 - .../syndtr/goleveldb/leveldb/batch_test.go | 147 - .../syndtr/goleveldb/leveldb/bench_test.go | 507 - .../goleveldb/leveldb/cache/bench_test.go | 29 - .../syndtr/goleveldb/leveldb/cache/cache.go | 1 - .../goleveldb/leveldb/cache/cache_test.go | 563 - .../leveldb/comparer/bytes_comparer.go | 4 +- .../goleveldb/leveldb/comparer/comparer.go | 2 +- .../syndtr/goleveldb/leveldb/corrupt_test.go | 496 - .../github.com/syndtr/goleveldb/leveldb/db.go | 100 +- .../syndtr/goleveldb/leveldb/db_compaction.go | 48 +- .../syndtr/goleveldb/leveldb/db_snapshot.go | 4 + .../syndtr/goleveldb/leveldb/db_test.go | 2925 --- .../goleveldb/leveldb/db_transaction.go | 4 + .../syndtr/goleveldb/leveldb/db_util.go | 2 +- .../syndtr/goleveldb/leveldb/db_write.go | 14 +- .../syndtr/goleveldb/leveldb/external_test.go | 117 - .../goleveldb/leveldb/filter/bloom_test.go | 142 - .../leveldb/iterator/array_iter_test.go | 30 - .../leveldb/iterator/indexed_iter_test.go | 83 - .../syndtr/goleveldb/leveldb/iterator/iter.go | 6 +- .../leveldb/iterator/iter_suite_test.go | 11 - .../leveldb/iterator/merged_iter_test.go | 60 - .../goleveldb/leveldb/journal/journal_test.go | 818 - .../syndtr/goleveldb/leveldb/key_test.go | 133 - .../goleveldb/leveldb/leveldb_suite_test.go | 11 - .../goleveldb/leveldb/memdb/bench_test.go | 75 - .../syndtr/goleveldb/leveldb/memdb/memdb.go | 6 +- .../leveldb/memdb/memdb_suite_test.go | 11 - .../goleveldb/leveldb/memdb/memdb_test.go | 135 - .../syndtr/goleveldb/leveldb/opt/options.go | 13 + .../syndtr/goleveldb/leveldb/session.go | 4 +- .../goleveldb/leveldb/session_record_test.go | 62 - .../syndtr/goleveldb/leveldb/session_util.go | 4 +- .../syndtr/goleveldb/leveldb/storage.go | 63 + .../goleveldb/leveldb/storage/file_storage.go | 300 +- .../leveldb/storage/file_storage_plan9.go | 4 +- .../leveldb/storage/file_storage_test.go | 176 - .../leveldb/storage/file_storage_unix.go | 12 + .../goleveldb/leveldb/storage/mem_storage.go | 8 +- .../leveldb/storage/mem_storage_test.go | 65 - .../goleveldb/leveldb/storage/storage.go | 8 + .../syndtr/goleveldb/leveldb/table.go | 30 +- .../goleveldb/leveldb/table/block_test.go | 139 - .../syndtr/goleveldb/leveldb/table/reader.go | 4 + .../leveldb/table/table_suite_test.go | 11 - .../goleveldb/leveldb/table/table_test.go | 123 - .../syndtr/goleveldb/leveldb/testutil/db.go | 222 - .../goleveldb/leveldb/testutil/ginkgo.go | 21 - .../syndtr/goleveldb/leveldb/testutil/iter.go | 327 - .../syndtr/goleveldb/leveldb/testutil/kv.go | 352 - .../goleveldb/leveldb/testutil/kvtest.go | 212 - .../goleveldb/leveldb/testutil/storage.go | 693 - .../syndtr/goleveldb/leveldb/testutil/util.go | 171 - .../syndtr/goleveldb/leveldb/testutil_test.go | 91 - .../syndtr/goleveldb/leveldb/util.go | 2 +- .../goleveldb/leveldb/util/buffer_test.go | 369 - .../goleveldb/leveldb/util/hash_test.go | 46 - .../syndtr/goleveldb/leveldb/util/util.go | 2 +- .../syndtr/goleveldb/leveldb/version_test.go | 181 - .../goleveldb/manualtest/dbstress/key.go | 137 - .../goleveldb/manualtest/dbstress/main.go | 628 - .../goleveldb/manualtest/filelock/main.go | 85 - .../gojsonpointer/LICENSE-APACHE-2.0.txt | 202 + .../xeipuuv/gojsonpointer/README.md | 41 + .../xeipuuv/gojsonpointer/pointer.go | 211 + .../gojsonreference/LICENSE-APACHE-2.0.txt | 202 + .../xeipuuv/gojsonreference/README.md | 10 + .../xeipuuv/gojsonreference/reference.go | 147 + .../xeipuuv/gojsonschema/.gitignore | 3 + .../xeipuuv/gojsonschema/.travis.yml | 9 + .../gojsonschema/LICENSE-APACHE-2.0.txt | 202 + .../github.com/xeipuuv/gojsonschema/README.md | 466 + .../github.com/xeipuuv/gojsonschema/draft.go | 125 + .../github.com/xeipuuv/gojsonschema/errors.go | 364 + .../xeipuuv/gojsonschema/format_checkers.go | 368 + .../xeipuuv/gojsonschema/glide.yaml | 13 + vendor/github.com/xeipuuv/gojsonschema/go.mod | 7 + vendor/github.com/xeipuuv/gojsonschema/go.sum | 11 + .../xeipuuv/gojsonschema/internalLog.go | 37 + .../xeipuuv/gojsonschema/jsonContext.go | 73 + .../xeipuuv/gojsonschema/jsonLoader.go | 386 + .../xeipuuv/gojsonschema/locales.go | 472 + .../github.com/xeipuuv/gojsonschema/result.go | 220 + .../github.com/xeipuuv/gojsonschema/schema.go | 1087 + .../xeipuuv/gojsonschema/schemaLoader.go | 206 + .../xeipuuv/gojsonschema/schemaPool.go | 215 + .../gojsonschema/schemaReferencePool.go | 68 + .../xeipuuv/gojsonschema/schemaType.go | 83 + .../xeipuuv/gojsonschema/subSchema.go | 149 + .../github.com/xeipuuv/gojsonschema/types.go | 62 + .../github.com/xeipuuv/gojsonschema/utils.go | 197 + .../xeipuuv/gojsonschema/validation.go | 858 + vendor/golang.org/x/net/.gitattributes | 10 - vendor/golang.org/x/net/.gitignore | 2 - vendor/golang.org/x/net/CONTRIBUTING.md | 31 - vendor/golang.org/x/net/README | 3 - vendor/golang.org/x/net/bpf/instructions.go | 32 +- .../golang.org/x/net/bpf/instructions_test.go | 525 - .../x/net/bpf/testdata/all_instructions.bpf | 1 - .../x/net/bpf/testdata/all_instructions.txt | 79 - vendor/golang.org/x/net/bpf/vm_aluop_test.go | 512 - vendor/golang.org/x/net/bpf/vm_bpf_test.go | 192 - .../golang.org/x/net/bpf/vm_extension_test.go | 49 - vendor/golang.org/x/net/bpf/vm_jump_test.go | 380 - vendor/golang.org/x/net/bpf/vm_load_test.go | 246 - vendor/golang.org/x/net/bpf/vm_ret_test.go | 115 - .../golang.org/x/net/bpf/vm_scratch_test.go | 247 - vendor/golang.org/x/net/bpf/vm_test.go | 144 - vendor/golang.org/x/net/codereview.cfg | 1 - vendor/golang.org/x/net/context/context.go | 54 - .../golang.org/x/net/context/context_test.go | 583 - .../x/net/context/ctxhttp/ctxhttp.go | 74 - .../x/net/context/ctxhttp/ctxhttp_17_test.go | 29 - .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 - .../net/context/ctxhttp/ctxhttp_pre17_test.go | 79 - .../x/net/context/ctxhttp/ctxhttp_test.go | 105 - vendor/golang.org/x/net/context/go17.go | 72 - vendor/golang.org/x/net/context/go19.go | 20 - vendor/golang.org/x/net/context/pre_go17.go | 300 - vendor/golang.org/x/net/context/pre_go19.go | 109 - .../x/net/context/withtimeout_test.go | 31 - vendor/golang.org/x/net/dict/dict.go | 210 - .../x/net/dns/dnsmessage/example_test.go | 132 - .../x/net/dns/dnsmessage/message.go | 1997 -- .../x/net/dns/dnsmessage/message_test.go | 1116 - vendor/golang.org/x/net/html/atom/atom.go | 78 - .../golang.org/x/net/html/atom/atom_test.go | 109 - vendor/golang.org/x/net/html/atom/gen.go | 648 - vendor/golang.org/x/net/html/atom/table.go | 713 - .../golang.org/x/net/html/atom/table_test.go | 351 - .../golang.org/x/net/html/charset/charset.go | 257 - .../x/net/html/charset/charset_test.go | 237 - .../html/charset/testdata/HTTP-charset.html | 48 - .../charset/testdata/HTTP-vs-UTF-8-BOM.html | 48 - .../testdata/HTTP-vs-meta-charset.html | 49 - .../testdata/HTTP-vs-meta-content.html | 49 - .../testdata/No-encoding-declaration.html | 47 - .../x/net/html/charset/testdata/README | 9 - .../html/charset/testdata/UTF-16BE-BOM.html | Bin 2670 -> 0 bytes .../html/charset/testdata/UTF-16LE-BOM.html | Bin 2682 -> 0 bytes .../testdata/UTF-8-BOM-vs-meta-charset.html | 49 - .../testdata/UTF-8-BOM-vs-meta-content.html | 48 - .../testdata/meta-charset-attribute.html | 48 - .../testdata/meta-content-attribute.html | 48 - vendor/golang.org/x/net/html/const.go | 102 - vendor/golang.org/x/net/html/doc.go | 106 - vendor/golang.org/x/net/html/doctype.go | 156 - vendor/golang.org/x/net/html/entity.go | 2253 --- vendor/golang.org/x/net/html/entity_test.go | 29 - vendor/golang.org/x/net/html/escape.go | 258 - vendor/golang.org/x/net/html/escape_test.go | 97 - vendor/golang.org/x/net/html/example_test.go | 40 - vendor/golang.org/x/net/html/foreign.go | 226 - vendor/golang.org/x/net/html/node.go | 193 - vendor/golang.org/x/net/html/node_test.go | 146 - vendor/golang.org/x/net/html/parse.go | 2094 -- vendor/golang.org/x/net/html/parse_test.go | 388 - vendor/golang.org/x/net/html/render.go | 271 - vendor/golang.org/x/net/html/render_test.go | 156 - .../golang.org/x/net/html/testdata/go1.html | 2237 --- .../x/net/html/testdata/webkit/README | 28 - .../x/net/html/testdata/webkit/adoption01.dat | 194 - .../x/net/html/testdata/webkit/adoption02.dat | 31 - .../x/net/html/testdata/webkit/comments01.dat | 135 - .../x/net/html/testdata/webkit/doctype01.dat | 370 - .../x/net/html/testdata/webkit/entities01.dat | 603 - .../x/net/html/testdata/webkit/entities02.dat | 249 - .../html/testdata/webkit/html5test-com.dat | 246 - .../x/net/html/testdata/webkit/inbody01.dat | 43 - .../x/net/html/testdata/webkit/isindex.dat | 40 - ...pending-spec-changes-plain-text-unsafe.dat | Bin 115 -> 0 bytes .../testdata/webkit/pending-spec-changes.dat | 52 - .../testdata/webkit/plain-text-unsafe.dat | Bin 4166 -> 0 bytes .../net/html/testdata/webkit/scriptdata01.dat | 308 - .../testdata/webkit/scripted/adoption01.dat | 15 - .../testdata/webkit/scripted/webkit01.dat | 28 - .../x/net/html/testdata/webkit/tables01.dat | 212 - .../x/net/html/testdata/webkit/tests1.dat | 1952 -- .../x/net/html/testdata/webkit/tests10.dat | 799 - .../x/net/html/testdata/webkit/tests11.dat | 482 - .../x/net/html/testdata/webkit/tests12.dat | 62 - .../x/net/html/testdata/webkit/tests14.dat | 74 - .../x/net/html/testdata/webkit/tests15.dat | 208 - .../x/net/html/testdata/webkit/tests16.dat | 2299 --- .../x/net/html/testdata/webkit/tests17.dat | 153 - .../x/net/html/testdata/webkit/tests18.dat | 269 - .../x/net/html/testdata/webkit/tests19.dat | 1237 -- .../x/net/html/testdata/webkit/tests2.dat | 763 - .../x/net/html/testdata/webkit/tests20.dat | 455 - .../x/net/html/testdata/webkit/tests21.dat | 221 - .../x/net/html/testdata/webkit/tests22.dat | 157 - .../x/net/html/testdata/webkit/tests23.dat | 155 - .../x/net/html/testdata/webkit/tests24.dat | 79 - .../x/net/html/testdata/webkit/tests25.dat | 219 - .../x/net/html/testdata/webkit/tests26.dat | 313 - .../x/net/html/testdata/webkit/tests3.dat | 305 - .../x/net/html/testdata/webkit/tests4.dat | 59 - .../x/net/html/testdata/webkit/tests5.dat | 191 - .../x/net/html/testdata/webkit/tests6.dat | 663 - .../x/net/html/testdata/webkit/tests7.dat | 390 - .../x/net/html/testdata/webkit/tests8.dat | 148 - .../x/net/html/testdata/webkit/tests9.dat | 457 - .../testdata/webkit/tests_innerHTML_1.dat | 741 - .../x/net/html/testdata/webkit/tricky01.dat | 261 - .../x/net/html/testdata/webkit/webkit01.dat | 610 - .../x/net/html/testdata/webkit/webkit02.dat | 159 - vendor/golang.org/x/net/html/token.go | 1219 -- vendor/golang.org/x/net/html/token_test.go | 748 - vendor/golang.org/x/net/http2/.gitignore | 2 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/README | 20 - vendor/golang.org/x/net/http2/ciphers.go | 641 - vendor/golang.org/x/net/http2/ciphers_test.go | 309 - .../x/net/http2/client_conn_pool.go | 256 - .../x/net/http2/configure_transport.go | 80 - vendor/golang.org/x/net/http2/databuffer.go | 146 - .../golang.org/x/net/http2/databuffer_test.go | 157 - vendor/golang.org/x/net/http2/errors.go | 133 - vendor/golang.org/x/net/http2/errors_test.go | 24 - vendor/golang.org/x/net/http2/flow.go | 50 - vendor/golang.org/x/net/http2/flow_test.go | 53 - vendor/golang.org/x/net/http2/frame.go | 1579 -- vendor/golang.org/x/net/http2/frame_test.go | 1191 -- vendor/golang.org/x/net/http2/go16.go | 16 - vendor/golang.org/x/net/http2/go17.go | 106 - vendor/golang.org/x/net/http2/go17_not18.go | 36 - vendor/golang.org/x/net/http2/go18.go | 56 - vendor/golang.org/x/net/http2/go18_test.go | 79 - vendor/golang.org/x/net/http2/go19.go | 16 - vendor/golang.org/x/net/http2/go19_test.go | 60 - vendor/golang.org/x/net/http2/gotrack.go | 170 - vendor/golang.org/x/net/http2/gotrack_test.go | 33 - .../golang.org/x/net/http2/h2demo/.gitignore | 5 - vendor/golang.org/x/net/http2/h2demo/Makefile | 8 - vendor/golang.org/x/net/http2/h2demo/README | 16 - .../golang.org/x/net/http2/h2demo/h2demo.go | 538 - .../golang.org/x/net/http2/h2demo/launch.go | 302 - .../golang.org/x/net/http2/h2demo/rootCA.key | 27 - .../golang.org/x/net/http2/h2demo/rootCA.pem | 26 - .../golang.org/x/net/http2/h2demo/rootCA.srl | 1 - .../golang.org/x/net/http2/h2demo/server.crt | 20 - .../golang.org/x/net/http2/h2demo/server.key | 27 - vendor/golang.org/x/net/http2/h2demo/tmpl.go | 1991 -- vendor/golang.org/x/net/http2/h2i/README.md | 97 - vendor/golang.org/x/net/http2/h2i/h2i.go | 522 - vendor/golang.org/x/net/http2/headermap.go | 78 - vendor/golang.org/x/net/http2/hpack/encode.go | 240 - .../x/net/http2/hpack/encode_test.go | 386 - vendor/golang.org/x/net/http2/hpack/hpack.go | 490 - .../x/net/http2/hpack/hpack_test.go | 722 - .../golang.org/x/net/http2/hpack/huffman.go | 212 - vendor/golang.org/x/net/http2/hpack/tables.go | 479 - .../x/net/http2/hpack/tables_test.go | 214 - vendor/golang.org/x/net/http2/http2.go | 391 - vendor/golang.org/x/net/http2/http2_test.go | 199 - vendor/golang.org/x/net/http2/not_go16.go | 21 - vendor/golang.org/x/net/http2/not_go17.go | 87 - vendor/golang.org/x/net/http2/not_go18.go | 29 - vendor/golang.org/x/net/http2/not_go19.go | 16 - vendor/golang.org/x/net/http2/pipe.go | 163 - vendor/golang.org/x/net/http2/pipe_test.go | 130 - vendor/golang.org/x/net/http2/server.go | 2857 --- .../x/net/http2/server_push_test.go | 521 - vendor/golang.org/x/net/http2/server_test.go | 3721 ---- .../testdata/draft-ietf-httpbis-http2.xml | 5021 ----- vendor/golang.org/x/net/http2/transport.go | 2275 --- .../golang.org/x/net/http2/transport_test.go | 3686 ---- vendor/golang.org/x/net/http2/write.go | 370 - vendor/golang.org/x/net/http2/writesched.go | 242 - .../x/net/http2/writesched_priority.go | 452 - .../x/net/http2/writesched_priority_test.go | 541 - .../x/net/http2/writesched_random.go | 72 - .../x/net/http2/writesched_random_test.go | 44 - .../golang.org/x/net/http2/writesched_test.go | 125 - vendor/golang.org/x/net/http2/z_spec_test.go | 356 - vendor/golang.org/x/net/icmp/dstunreach.go | 41 - vendor/golang.org/x/net/icmp/echo.go | 45 - vendor/golang.org/x/net/icmp/endpoint.go | 113 - vendor/golang.org/x/net/icmp/example_test.go | 63 - vendor/golang.org/x/net/icmp/extension.go | 89 - .../golang.org/x/net/icmp/extension_test.go | 259 - vendor/golang.org/x/net/icmp/helper_posix.go | 75 - vendor/golang.org/x/net/icmp/interface.go | 236 - vendor/golang.org/x/net/icmp/ipv4.go | 61 - vendor/golang.org/x/net/icmp/ipv4_test.go | 83 - vendor/golang.org/x/net/icmp/ipv6.go | 23 - vendor/golang.org/x/net/icmp/listen_posix.go | 100 - vendor/golang.org/x/net/icmp/listen_stub.go | 33 - vendor/golang.org/x/net/icmp/message.go | 152 - vendor/golang.org/x/net/icmp/message_test.go | 134 - vendor/golang.org/x/net/icmp/messagebody.go | 41 - vendor/golang.org/x/net/icmp/mpls.go | 77 - vendor/golang.org/x/net/icmp/multipart.go | 109 - .../golang.org/x/net/icmp/multipart_test.go | 442 - vendor/golang.org/x/net/icmp/packettoobig.go | 43 - vendor/golang.org/x/net/icmp/paramprob.go | 63 - vendor/golang.org/x/net/icmp/ping_test.go | 200 - vendor/golang.org/x/net/icmp/sys_freebsd.go | 11 - vendor/golang.org/x/net/icmp/timeexceeded.go | 39 - vendor/golang.org/x/net/idna/example_test.go | 70 - vendor/golang.org/x/net/idna/idna.go | 680 - vendor/golang.org/x/net/idna/idna_test.go | 108 - vendor/golang.org/x/net/idna/punycode.go | 203 - vendor/golang.org/x/net/idna/punycode_test.go | 198 - vendor/golang.org/x/net/idna/tables.go | 4477 ----- vendor/golang.org/x/net/idna/trie.go | 72 - vendor/golang.org/x/net/idna/trieval.go | 114 - .../golang.org/x/net/internal/iana/const.go | 109 +- vendor/golang.org/x/net/internal/iana/gen.go | 293 - .../x/net/internal/nettest/helper_bsd.go | 53 - .../x/net/internal/nettest/helper_nobsd.go | 15 - .../x/net/internal/nettest/helper_posix.go | 31 - .../x/net/internal/nettest/helper_stub.go | 32 - .../x/net/internal/nettest/helper_unix.go | 29 - .../x/net/internal/nettest/helper_windows.go | 42 - .../x/net/internal/nettest/interface.go | 94 - .../x/net/internal/nettest/rlimit.go | 11 - .../x/net/internal/nettest/stack.go | 147 - .../x/net/internal/socket/defs_darwin.go | 44 - .../x/net/internal/socket/defs_dragonfly.go | 44 - .../x/net/internal/socket/defs_freebsd.go | 44 - .../x/net/internal/socket/defs_linux.go | 49 - .../x/net/internal/socket/defs_netbsd.go | 47 - .../x/net/internal/socket/defs_openbsd.go | 44 - .../x/net/internal/socket/defs_solaris.go | 44 - .../x/net/internal/socket/iovec_32bit.go | 6 +- .../x/net/internal/socket/iovec_64bit.go | 6 +- .../internal/socket/iovec_solaris_64bit.go | 6 +- .../x/net/internal/socket/msghdr_bsdvar.go | 6 +- .../net/internal/socket/msghdr_linux_32bit.go | 6 +- .../net/internal/socket/msghdr_linux_64bit.go | 6 +- .../x/net/internal/socket/msghdr_openbsd.go | 6 +- .../internal/socket/msghdr_solaris_64bit.go | 6 +- .../x/net/internal/socket/socket.go | 12 +- .../net/internal/socket/socket_go1_9_test.go | 256 - .../x/net/internal/socket/socket_test.go | 46 - .../x/net/internal/socket/sys_posix.go | 6 +- .../net/internal/socket/zsys_darwin_arm64.go | 61 + .../x/net/internal/socket/zsys_netbsd_arm.go | 6 + .../golang.org/x/net/internal/socks/client.go | 168 + .../golang.org/x/net/internal/socks/socks.go | 316 + .../x/net/internal/timeseries/timeseries.go | 525 - .../internal/timeseries/timeseries_test.go | 170 - vendor/golang.org/x/net/ipv4/batch.go | 9 +- vendor/golang.org/x/net/ipv4/bpf_test.go | 93 - vendor/golang.org/x/net/ipv4/control_test.go | 21 - vendor/golang.org/x/net/ipv4/defs_darwin.go | 77 - .../golang.org/x/net/ipv4/defs_dragonfly.go | 38 - vendor/golang.org/x/net/ipv4/defs_freebsd.go | 75 - vendor/golang.org/x/net/ipv4/defs_linux.go | 122 - vendor/golang.org/x/net/ipv4/defs_netbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_openbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_solaris.go | 84 - vendor/golang.org/x/net/ipv4/dgramopt.go | 31 +- vendor/golang.org/x/net/ipv4/doc.go | 4 +- vendor/golang.org/x/net/ipv4/endpoint.go | 21 +- vendor/golang.org/x/net/ipv4/example_test.go | 224 - vendor/golang.org/x/net/ipv4/gen.go | 199 - vendor/golang.org/x/net/ipv4/genericopt.go | 10 +- vendor/golang.org/x/net/ipv4/header.go | 17 +- vendor/golang.org/x/net/ipv4/header_test.go | 228 - vendor/golang.org/x/net/ipv4/helper.go | 1 + vendor/golang.org/x/net/ipv4/iana.go | 10 +- vendor/golang.org/x/net/ipv4/icmp_test.go | 95 - .../golang.org/x/net/ipv4/multicast_test.go | 334 - .../x/net/ipv4/multicastlistener_test.go | 265 - .../x/net/ipv4/multicastsockopt_test.go | 195 - vendor/golang.org/x/net/ipv4/packet.go | 5 +- vendor/golang.org/x/net/ipv4/payload_cmsg.go | 11 +- .../x/net/ipv4/payload_cmsg_go1_8.go | 2 +- .../x/net/ipv4/payload_cmsg_go1_9.go | 2 +- .../golang.org/x/net/ipv4/payload_nocmsg.go | 11 +- .../x/net/ipv4/readwrite_go1_8_test.go | 248 - .../x/net/ipv4/readwrite_go1_9_test.go | 388 - .../golang.org/x/net/ipv4/readwrite_test.go | 140 - vendor/golang.org/x/net/ipv4/unicast_test.go | 247 - .../x/net/ipv4/unicastsockopt_test.go | 148 - vendor/golang.org/x/net/ipv6/batch.go | 5 +- vendor/golang.org/x/net/ipv6/bpf_test.go | 96 - vendor/golang.org/x/net/ipv6/control_test.go | 21 - vendor/golang.org/x/net/ipv6/defs_darwin.go | 112 - .../golang.org/x/net/ipv6/defs_dragonfly.go | 84 - vendor/golang.org/x/net/ipv6/defs_freebsd.go | 105 - vendor/golang.org/x/net/ipv6/defs_linux.go | 147 - vendor/golang.org/x/net/ipv6/defs_netbsd.go | 80 - vendor/golang.org/x/net/ipv6/defs_openbsd.go | 89 - vendor/golang.org/x/net/ipv6/defs_solaris.go | 114 - vendor/golang.org/x/net/ipv6/dgramopt.go | 35 +- vendor/golang.org/x/net/ipv6/doc.go | 4 +- vendor/golang.org/x/net/ipv6/endpoint.go | 13 +- vendor/golang.org/x/net/ipv6/example_test.go | 216 - vendor/golang.org/x/net/ipv6/gen.go | 199 - vendor/golang.org/x/net/ipv6/genericopt.go | 10 +- vendor/golang.org/x/net/ipv6/header_test.go | 55 - vendor/golang.org/x/net/ipv6/helper.go | 1 + vendor/golang.org/x/net/ipv6/iana.go | 10 +- vendor/golang.org/x/net/ipv6/icmp_test.go | 96 - .../x/net/ipv6/mocktransponder_test.go | 32 - .../golang.org/x/net/ipv6/multicast_test.go | 264 - .../x/net/ipv6/multicastlistener_test.go | 261 - .../x/net/ipv6/multicastsockopt_test.go | 157 - vendor/golang.org/x/net/ipv6/payload_cmsg.go | 11 +- .../x/net/ipv6/payload_cmsg_go1_8.go | 2 +- .../x/net/ipv6/payload_cmsg_go1_9.go | 2 +- .../golang.org/x/net/ipv6/payload_nocmsg.go | 11 +- .../x/net/ipv6/readwrite_go1_8_test.go | 242 - .../x/net/ipv6/readwrite_go1_9_test.go | 373 - .../golang.org/x/net/ipv6/readwrite_test.go | 148 - vendor/golang.org/x/net/ipv6/sockopt_test.go | 133 - vendor/golang.org/x/net/ipv6/unicast_test.go | 184 - .../x/net/ipv6/unicastsockopt_test.go | 120 - .../golang.org/x/net/lex/httplex/httplex.go | 351 - .../x/net/lex/httplex/httplex_test.go | 119 - vendor/golang.org/x/net/lif/address.go | 105 - vendor/golang.org/x/net/lif/address_test.go | 123 - vendor/golang.org/x/net/lif/binary.go | 115 - vendor/golang.org/x/net/lif/defs_solaris.go | 90 - vendor/golang.org/x/net/lif/lif.go | 43 - vendor/golang.org/x/net/lif/link.go | 126 - vendor/golang.org/x/net/lif/link_test.go | 63 - vendor/golang.org/x/net/lif/sys.go | 21 - .../golang.org/x/net/lif/sys_solaris_amd64.s | 8 - vendor/golang.org/x/net/lif/syscall.go | 28 - .../x/net/lif/zsys_solaris_amd64.go | 103 - vendor/golang.org/x/net/nettest/conntest.go | 456 - .../golang.org/x/net/nettest/conntest_go16.go | 24 - .../golang.org/x/net/nettest/conntest_go17.go | 24 - .../golang.org/x/net/nettest/conntest_test.go | 76 - vendor/golang.org/x/net/netutil/listen.go | 48 - .../golang.org/x/net/netutil/listen_test.go | 101 - vendor/golang.org/x/net/proxy/per_host.go | 2 +- .../golang.org/x/net/proxy/per_host_test.go | 55 - vendor/golang.org/x/net/proxy/proxy_test.go | 215 - vendor/golang.org/x/net/proxy/socks5.go | 215 +- vendor/golang.org/x/net/publicsuffix/gen.go | 713 - vendor/golang.org/x/net/publicsuffix/list.go | 135 - .../x/net/publicsuffix/list_test.go | 416 - vendor/golang.org/x/net/publicsuffix/table.go | 9419 --------- .../x/net/publicsuffix/table_test.go | 16756 ---------------- vendor/golang.org/x/net/route/address.go | 425 - .../x/net/route/address_darwin_test.go | 63 - vendor/golang.org/x/net/route/address_test.go | 103 - vendor/golang.org/x/net/route/binary.go | 90 - vendor/golang.org/x/net/route/defs_darwin.go | 114 - .../golang.org/x/net/route/defs_dragonfly.go | 113 - vendor/golang.org/x/net/route/defs_freebsd.go | 337 - vendor/golang.org/x/net/route/defs_netbsd.go | 112 - vendor/golang.org/x/net/route/defs_openbsd.go | 116 - vendor/golang.org/x/net/route/interface.go | 64 - .../x/net/route/interface_announce.go | 32 - .../x/net/route/interface_classic.go | 66 - .../x/net/route/interface_freebsd.go | 78 - .../x/net/route/interface_multicast.go | 30 - .../x/net/route/interface_openbsd.go | 90 - vendor/golang.org/x/net/route/message.go | 72 - .../x/net/route/message_darwin_test.go | 34 - .../x/net/route/message_freebsd_test.go | 92 - vendor/golang.org/x/net/route/message_test.go | 239 - vendor/golang.org/x/net/route/route.go | 123 - .../golang.org/x/net/route/route_classic.go | 67 - .../golang.org/x/net/route/route_openbsd.go | 65 - vendor/golang.org/x/net/route/route_test.go | 390 - vendor/golang.org/x/net/route/sys.go | 39 - vendor/golang.org/x/net/route/sys_darwin.go | 87 - .../golang.org/x/net/route/sys_dragonfly.go | 76 - vendor/golang.org/x/net/route/sys_freebsd.go | 155 - vendor/golang.org/x/net/route/sys_netbsd.go | 71 - vendor/golang.org/x/net/route/sys_openbsd.go | 80 - vendor/golang.org/x/net/route/syscall.go | 28 - vendor/golang.org/x/net/route/zsys_darwin.go | 99 - .../golang.org/x/net/route/zsys_dragonfly.go | 98 - .../x/net/route/zsys_freebsd_386.go | 126 - .../x/net/route/zsys_freebsd_amd64.go | 123 - .../x/net/route/zsys_freebsd_arm.go | 123 - vendor/golang.org/x/net/route/zsys_netbsd.go | 97 - vendor/golang.org/x/net/route/zsys_openbsd.go | 101 - vendor/golang.org/x/net/trace/events.go | 532 - vendor/golang.org/x/net/trace/histogram.go | 365 - .../golang.org/x/net/trace/histogram_test.go | 325 - vendor/golang.org/x/net/trace/trace.go | 1082 - vendor/golang.org/x/net/trace/trace_go16.go | 21 - vendor/golang.org/x/net/trace/trace_go17.go | 21 - vendor/golang.org/x/net/trace/trace_test.go | 178 - vendor/golang.org/x/net/webdav/file.go | 796 - vendor/golang.org/x/net/webdav/file_go1.6.go | 17 - vendor/golang.org/x/net/webdav/file_go1.7.go | 16 - vendor/golang.org/x/net/webdav/file_test.go | 1184 -- vendor/golang.org/x/net/webdav/if.go | 173 - vendor/golang.org/x/net/webdav/if_test.go | 322 - .../x/net/webdav/internal/xml/README | 11 - .../x/net/webdav/internal/xml/atom_test.go | 56 - .../x/net/webdav/internal/xml/example_test.go | 151 - .../x/net/webdav/internal/xml/marshal.go | 1223 -- .../x/net/webdav/internal/xml/marshal_test.go | 1939 -- .../x/net/webdav/internal/xml/read.go | 692 - .../x/net/webdav/internal/xml/read_test.go | 744 - .../x/net/webdav/internal/xml/typeinfo.go | 371 - .../x/net/webdav/internal/xml/xml.go | 1998 -- .../x/net/webdav/internal/xml/xml_test.go | 752 - .../x/net/webdav/litmus_test_server.go | 94 - vendor/golang.org/x/net/webdav/lock.go | 445 - vendor/golang.org/x/net/webdav/lock_test.go | 731 - vendor/golang.org/x/net/webdav/prop.go | 418 - vendor/golang.org/x/net/webdav/prop_test.go | 613 - vendor/golang.org/x/net/webdav/webdav.go | 702 - vendor/golang.org/x/net/webdav/webdav_test.go | 344 - vendor/golang.org/x/net/webdav/xml.go | 519 - vendor/golang.org/x/net/webdav/xml_test.go | 906 - .../golang.org/x/net/websocket/dial_test.go | 43 - .../x/net/websocket/exampledial_test.go | 31 - .../x/net/websocket/examplehandler_test.go | 26 - .../golang.org/x/net/websocket/hybi_test.go | 608 - .../golang.org/x/net/websocket/websocket.go | 3 + .../x/net/websocket/websocket_test.go | 665 - vendor/golang.org/x/net/xsrftoken/xsrf.go | 94 - .../golang.org/x/net/xsrftoken/xsrf_test.go | 83 - vendor/modules.txt | 68 + 761 files changed, 15379 insertions(+), 177425 deletions(-) create mode 100644 .gitignore delete mode 100644 Gopkg.lock delete mode 100644 Gopkg.toml create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 vendor/code.linksmart.eu/com/go-sec/.gitignore delete mode 100644 vendor/code.linksmart.eu/com/go-sec/Gopkg.lock delete mode 100644 vendor/code.linksmart.eu/com/go-sec/Gopkg.toml delete mode 100644 vendor/code.linksmart.eu/com/go-sec/README.md delete mode 100644 vendor/code.linksmart.eu/com/go-sec/auth/README.md delete mode 100644 vendor/code.linksmart.eu/com/go-sec/authz/README.md delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/.gitignore delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/Dockerfile delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/Gopkg.lock delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/Gopkg.toml delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/README.md delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/apidoc/swagger.json delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/config.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/discovery/init.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/init.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/main.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/router.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/sample_conf/docker.json delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/sample_conf/service-catalog-auth.json delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/sample_conf/service-catalog.json delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/catalog.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/catalogapi.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/catalogapi_test.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/client.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/constants.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/controller.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/controller_test.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/doc.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/errors.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/gcpublisher.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/init.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/main_test.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/remoteclient.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/service/service-registrar.go delete mode 100644 vendor/code.linksmart.eu/sc/service-catalog/utils/init.go create mode 100644 vendor/github.com/ancientlore/go-avltree/go.mod delete mode 100644 vendor/github.com/ancientlore/go-avltree/objecttree_test.go delete mode 100644 vendor/github.com/ancientlore/go-avltree/pairtree_test.go delete mode 100644 vendor/github.com/ancientlore/go-avltree/stringtree_test.go delete mode 100644 vendor/github.com/ancientlore/go-avltree/tree_test.go delete mode 100644 vendor/github.com/codegangsta/negroni/logger_test.go delete mode 100644 vendor/github.com/codegangsta/negroni/negroni_test.go delete mode 100644 vendor/github.com/codegangsta/negroni/recovery_test.go delete mode 100644 vendor/github.com/codegangsta/negroni/response_writer_test.go delete mode 100644 vendor/github.com/codegangsta/negroni/static_test.go delete mode 100644 vendor/github.com/codegangsta/negroni/translations/README_de_de.md delete mode 100644 vendor/github.com/codegangsta/negroni/translations/README_pt_br.md delete mode 100644 vendor/github.com/codegangsta/negroni/translations/README_zh_cn.md delete mode 100644 vendor/github.com/codegangsta/negroni/translations/README_zh_tw.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/example_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/hmac_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/http_example_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/none_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/parser_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/request/doc.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/request/extractor.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/request/extractor_example_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/request/extractor_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/request/oauth2.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/request/request.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/request/request_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_test.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/helpers.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/sample_key delete mode 100644 vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/.gitignore create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/LICENSE create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/README.md create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/about.html create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/client.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/components.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/filestore.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/memstore.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/message.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/messageids.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/net.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/notice.html create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/oops.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/options.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/ping.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/router.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/store.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/token.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/topic.go create mode 100644 vendor/github.com/eclipse/paho.mqtt.golang/trace.go create mode 100644 vendor/github.com/farshidtz/elog/.travis.yml create mode 100644 vendor/github.com/farshidtz/elog/LICENSE create mode 100644 vendor/github.com/farshidtz/elog/README.md create mode 100644 vendor/github.com/farshidtz/elog/config.go create mode 100644 vendor/github.com/farshidtz/elog/doc.go create mode 100644 vendor/github.com/farshidtz/elog/go.mod create mode 100644 vendor/github.com/farshidtz/elog/logger.go create mode 100644 vendor/github.com/farshidtz/elog/writer.go create mode 100644 vendor/github.com/farshidtz/mqtt-match/.travis.yml create mode 100644 vendor/github.com/farshidtz/mqtt-match/LICENSE create mode 100644 vendor/github.com/farshidtz/mqtt-match/README.md create mode 100644 vendor/github.com/farshidtz/mqtt-match/match.go delete mode 100644 vendor/github.com/golang/snappy/cmd/snappytool/main.cpp delete mode 100644 vendor/github.com/golang/snappy/golden_test.go delete mode 100644 vendor/github.com/golang/snappy/snappy_test.go delete mode 100644 vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt delete mode 100644 vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy delete mode 100644 vendor/github.com/gorilla/context/context_test.go delete mode 100644 vendor/github.com/gorilla/mux/bench_test.go delete mode 100644 vendor/github.com/gorilla/mux/context_gorilla_test.go delete mode 100644 vendor/github.com/gorilla/mux/context_native_test.go delete mode 100644 vendor/github.com/gorilla/mux/mux_test.go delete mode 100644 vendor/github.com/gorilla/mux/old_test.go delete mode 100644 vendor/github.com/justinas/alice/chain_test.go create mode 100644 vendor/github.com/linksmart/go-sec/LICENSE create mode 100644 vendor/github.com/linksmart/go-sec/NOTICE rename vendor/{code.linksmart.eu/com => github.com/linksmart}/go-sec/auth/keycloak/obtainer/obtainer.go (97%) rename vendor/{code.linksmart.eu/com => github.com/linksmart}/go-sec/auth/keycloak/validator/validator.go (98%) rename vendor/{code.linksmart.eu/com => github.com/linksmart}/go-sec/auth/obtainer/client.go (100%) rename vendor/{code.linksmart.eu/com => github.com/linksmart}/go-sec/auth/obtainer/obtainer.go (100%) rename vendor/{code.linksmart.eu/com => github.com/linksmart}/go-sec/auth/validator/handler.go (98%) rename vendor/{code.linksmart.eu/com => github.com/linksmart}/go-sec/auth/validator/validator.go (98%) rename vendor/{code.linksmart.eu/com => github.com/linksmart}/go-sec/authz/authz.go (100%) rename vendor/{code.linksmart.eu/com => github.com/linksmart}/go-sec/authz/config.go (100%) rename vendor/{code.linksmart.eu/com/go-sec => github.com/linksmart/service-catalog}/LICENSE (100%) rename vendor/{code.linksmart.eu/com/go-sec => github.com/linksmart/service-catalog}/NOTICE (100%) rename vendor/{code.linksmart.eu/sc => github.com/linksmart}/service-catalog/discovery/README.md (100%) rename vendor/{code.linksmart.eu/sc => github.com/linksmart}/service-catalog/discovery/catalog.go (100%) create mode 100644 vendor/github.com/linksmart/service-catalog/discovery/init.go rename vendor/{code.linksmart.eu/sc => github.com/linksmart}/service-catalog/discovery/utils.go (87%) rename vendor/{code.linksmart.eu/sc/service-catalog => github.com/linksmart/service-catalog/v2}/LICENSE (100%) rename vendor/{code.linksmart.eu/sc/service-catalog => github.com/linksmart/service-catalog/v2}/NOTICE (100%) create mode 100644 vendor/github.com/linksmart/service-catalog/v2/catalog/catalog.go create mode 100644 vendor/github.com/linksmart/service-catalog/v2/catalog/constants.go create mode 100644 vendor/github.com/linksmart/service-catalog/v2/catalog/controller.go create mode 100644 vendor/github.com/linksmart/service-catalog/v2/catalog/errors.go create mode 100644 vendor/github.com/linksmart/service-catalog/v2/catalog/http.go create mode 100644 vendor/github.com/linksmart/service-catalog/v2/catalog/init.go rename vendor/{code.linksmart.eu/sc/service-catalog/service => github.com/linksmart/service-catalog/v2/catalog}/ldbstorage.go (78%) rename vendor/{code.linksmart.eu/sc/service-catalog/service => github.com/linksmart/service-catalog/v2/catalog}/memstorage.go (78%) create mode 100644 vendor/github.com/linksmart/service-catalog/v2/catalog/mqtt.go create mode 100644 vendor/github.com/linksmart/service-catalog/v2/catalog/mqtt_conf.go rename vendor/{code.linksmart.eu/sc/service-catalog => github.com/linksmart/service-catalog/v2}/utils/http.go (97%) create mode 100644 vendor/github.com/linksmart/service-catalog/v2/utils/init.go rename vendor/{code.linksmart.eu/sc/service-catalog => github.com/linksmart/service-catalog/v2}/utils/pagination.go (100%) rename vendor/{code.linksmart.eu/sc/service-catalog => github.com/linksmart/service-catalog/v2}/utils/pathfilter.go (100%) delete mode 100644 vendor/github.com/miekg/dns/client_test.go delete mode 100644 vendor/github.com/miekg/dns/clientconfig_test.go delete mode 100644 vendor/github.com/miekg/dns/compress_generate.go delete mode 100644 vendor/github.com/miekg/dns/dns_bench_test.go delete mode 100644 vendor/github.com/miekg/dns/dns_test.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_test.go delete mode 100644 vendor/github.com/miekg/dns/dnsutil/util.go delete mode 100644 vendor/github.com/miekg/dns/dnsutil/util_test.go delete mode 100644 vendor/github.com/miekg/dns/dyn_test.go delete mode 100644 vendor/github.com/miekg/dns/edns_test.go delete mode 100644 vendor/github.com/miekg/dns/example_test.go delete mode 100644 vendor/github.com/miekg/dns/fuzz_test.go delete mode 100644 vendor/github.com/miekg/dns/idn/code_points.go delete mode 100644 vendor/github.com/miekg/dns/idn/example_test.go delete mode 100644 vendor/github.com/miekg/dns/idn/punycode.go delete mode 100644 vendor/github.com/miekg/dns/idn/punycode_test.go delete mode 100644 vendor/github.com/miekg/dns/issue_test.go delete mode 100644 vendor/github.com/miekg/dns/labels_test.go delete mode 100644 vendor/github.com/miekg/dns/msg_generate.go delete mode 100644 vendor/github.com/miekg/dns/msg_test.go delete mode 100644 vendor/github.com/miekg/dns/nsecx_test.go delete mode 100644 vendor/github.com/miekg/dns/parse_test.go delete mode 100644 vendor/github.com/miekg/dns/privaterr_test.go delete mode 100644 vendor/github.com/miekg/dns/remote_test.go delete mode 100644 vendor/github.com/miekg/dns/sanitize_test.go delete mode 100644 vendor/github.com/miekg/dns/scan_test.go delete mode 100644 vendor/github.com/miekg/dns/server_test.go delete mode 100644 vendor/github.com/miekg/dns/sig0_test.go delete mode 100644 vendor/github.com/miekg/dns/tsig_test.go delete mode 100644 vendor/github.com/miekg/dns/types_generate.go delete mode 100644 vendor/github.com/miekg/dns/types_test.go delete mode 100644 vendor/github.com/miekg/dns/update_test.go delete mode 100644 vendor/github.com/miekg/dns/xfr_test.go delete mode 100644 vendor/github.com/pborman/uuid/marshal_test.go delete mode 100644 vendor/github.com/pborman/uuid/seq_test.go delete mode 100644 vendor/github.com/pborman/uuid/sql_test.go delete mode 100644 vendor/github.com/pborman/uuid/uuid_test.go create mode 100644 vendor/github.com/satori/go.uuid/.travis.yml create mode 100644 vendor/github.com/satori/go.uuid/LICENSE create mode 100644 vendor/github.com/satori/go.uuid/README.md create mode 100644 vendor/github.com/satori/go.uuid/codec.go create mode 100644 vendor/github.com/satori/go.uuid/generator.go create mode 100644 vendor/github.com/satori/go.uuid/sql.go create mode 100644 vendor/github.com/satori/go.uuid/uuid.go delete mode 100644 vendor/github.com/syndtr/goleveldb/.travis.yml delete mode 100644 vendor/github.com/syndtr/goleveldb/README.md delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/batch_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/bench_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/corrupt_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/external_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/key_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_record_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/testutil/db.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/testutil/iter.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/testutil/kv.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/testutil/storage.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/testutil/util.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/testutil_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/hash_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/version_test.go delete mode 100644 vendor/github.com/syndtr/goleveldb/manualtest/dbstress/key.go delete mode 100644 vendor/github.com/syndtr/goleveldb/manualtest/dbstress/main.go delete mode 100644 vendor/github.com/syndtr/goleveldb/manualtest/filelock/main.go create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/pointer.go create mode 100644 vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonreference/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonreference/reference.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/.gitignore create mode 100644 vendor/github.com/xeipuuv/gojsonschema/.travis.yml create mode 100644 vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonschema/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonschema/draft.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/errors.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/format_checkers.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/glide.yaml create mode 100644 vendor/github.com/xeipuuv/gojsonschema/go.mod create mode 100644 vendor/github.com/xeipuuv/gojsonschema/go.sum create mode 100644 vendor/github.com/xeipuuv/gojsonschema/internalLog.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonContext.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/locales.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/result.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaPool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaType.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/subSchema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/types.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/utils.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/validation.go delete mode 100644 vendor/golang.org/x/net/.gitattributes delete mode 100644 vendor/golang.org/x/net/.gitignore delete mode 100644 vendor/golang.org/x/net/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/net/README delete mode 100644 vendor/golang.org/x/net/bpf/instructions_test.go delete mode 100644 vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf delete mode 100644 vendor/golang.org/x/net/bpf/testdata/all_instructions.txt delete mode 100644 vendor/golang.org/x/net/bpf/vm_aluop_test.go delete mode 100644 vendor/golang.org/x/net/bpf/vm_bpf_test.go delete mode 100644 vendor/golang.org/x/net/bpf/vm_extension_test.go delete mode 100644 vendor/golang.org/x/net/bpf/vm_jump_test.go delete mode 100644 vendor/golang.org/x/net/bpf/vm_load_test.go delete mode 100644 vendor/golang.org/x/net/bpf/vm_ret_test.go delete mode 100644 vendor/golang.org/x/net/bpf/vm_scratch_test.go delete mode 100644 vendor/golang.org/x/net/bpf/vm_test.go delete mode 100644 vendor/golang.org/x/net/codereview.cfg delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/context_test.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 vendor/golang.org/x/net/context/withtimeout_test.go delete mode 100644 vendor/golang.org/x/net/dict/dict.go delete mode 100644 vendor/golang.org/x/net/dns/dnsmessage/example_test.go delete mode 100644 vendor/golang.org/x/net/dns/dnsmessage/message.go delete mode 100644 vendor/golang.org/x/net/dns/dnsmessage/message_test.go delete mode 100644 vendor/golang.org/x/net/html/atom/atom.go delete mode 100644 vendor/golang.org/x/net/html/atom/atom_test.go delete mode 100644 vendor/golang.org/x/net/html/atom/gen.go delete mode 100644 vendor/golang.org/x/net/html/atom/table.go delete mode 100644 vendor/golang.org/x/net/html/atom/table_test.go delete mode 100644 vendor/golang.org/x/net/html/charset/charset.go delete mode 100644 vendor/golang.org/x/net/html/charset/charset_test.go delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/README delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html delete mode 100644 vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html delete mode 100644 vendor/golang.org/x/net/html/const.go delete mode 100644 vendor/golang.org/x/net/html/doc.go delete mode 100644 vendor/golang.org/x/net/html/doctype.go delete mode 100644 vendor/golang.org/x/net/html/entity.go delete mode 100644 vendor/golang.org/x/net/html/entity_test.go delete mode 100644 vendor/golang.org/x/net/html/escape.go delete mode 100644 vendor/golang.org/x/net/html/escape_test.go delete mode 100644 vendor/golang.org/x/net/html/example_test.go delete mode 100644 vendor/golang.org/x/net/html/foreign.go delete mode 100644 vendor/golang.org/x/net/html/node.go delete mode 100644 vendor/golang.org/x/net/html/node_test.go delete mode 100644 vendor/golang.org/x/net/html/parse.go delete mode 100644 vendor/golang.org/x/net/html/parse_test.go delete mode 100644 vendor/golang.org/x/net/html/render.go delete mode 100644 vendor/golang.org/x/net/html/render_test.go delete mode 100644 vendor/golang.org/x/net/html/testdata/go1.html delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/README delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/adoption01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/adoption02.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/comments01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/doctype01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/entities01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/entities02.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/html5test-com.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/inbody01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/isindex.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/scriptdata01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tables01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests1.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests10.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests11.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests12.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests14.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests15.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests16.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests17.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests18.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests19.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests2.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests20.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests21.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests22.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests23.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests24.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests25.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests26.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests3.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests4.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests5.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests6.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests7.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests8.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests9.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/tricky01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/webkit01.dat delete mode 100644 vendor/golang.org/x/net/html/testdata/webkit/webkit02.dat delete mode 100644 vendor/golang.org/x/net/html/token.go delete mode 100644 vendor/golang.org/x/net/html/token_test.go delete mode 100644 vendor/golang.org/x/net/http2/.gitignore delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/net/http2/README delete mode 100644 vendor/golang.org/x/net/http2/ciphers.go delete mode 100644 vendor/golang.org/x/net/http2/ciphers_test.go delete mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/golang.org/x/net/http2/configure_transport.go delete mode 100644 vendor/golang.org/x/net/http2/databuffer.go delete mode 100644 vendor/golang.org/x/net/http2/databuffer_test.go delete mode 100644 vendor/golang.org/x/net/http2/errors.go delete mode 100644 vendor/golang.org/x/net/http2/errors_test.go delete mode 100644 vendor/golang.org/x/net/http2/flow.go delete mode 100644 vendor/golang.org/x/net/http2/flow_test.go delete mode 100644 vendor/golang.org/x/net/http2/frame.go delete mode 100644 vendor/golang.org/x/net/http2/frame_test.go delete mode 100644 vendor/golang.org/x/net/http2/go16.go delete mode 100644 vendor/golang.org/x/net/http2/go17.go delete mode 100644 vendor/golang.org/x/net/http2/go17_not18.go delete mode 100644 vendor/golang.org/x/net/http2/go18.go delete mode 100644 vendor/golang.org/x/net/http2/go18_test.go delete mode 100644 vendor/golang.org/x/net/http2/go19.go delete mode 100644 vendor/golang.org/x/net/http2/go19_test.go delete mode 100644 vendor/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/golang.org/x/net/http2/gotrack_test.go delete mode 100644 vendor/golang.org/x/net/http2/h2demo/.gitignore delete mode 100644 vendor/golang.org/x/net/http2/h2demo/Makefile delete mode 100644 vendor/golang.org/x/net/http2/h2demo/README delete mode 100644 vendor/golang.org/x/net/http2/h2demo/h2demo.go delete mode 100644 vendor/golang.org/x/net/http2/h2demo/launch.go delete mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.key delete mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.pem delete mode 100644 vendor/golang.org/x/net/http2/h2demo/rootCA.srl delete mode 100644 vendor/golang.org/x/net/http2/h2demo/server.crt delete mode 100644 vendor/golang.org/x/net/http2/h2demo/server.key delete mode 100644 vendor/golang.org/x/net/http2/h2demo/tmpl.go delete mode 100644 vendor/golang.org/x/net/http2/h2i/README.md delete mode 100644 vendor/golang.org/x/net/http2/h2i/h2i.go delete mode 100644 vendor/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/encode_test.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/hpack_test.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/tables_test.go delete mode 100644 vendor/golang.org/x/net/http2/http2.go delete mode 100644 vendor/golang.org/x/net/http2/http2_test.go delete mode 100644 vendor/golang.org/x/net/http2/not_go16.go delete mode 100644 vendor/golang.org/x/net/http2/not_go17.go delete mode 100644 vendor/golang.org/x/net/http2/not_go18.go delete mode 100644 vendor/golang.org/x/net/http2/not_go19.go delete mode 100644 vendor/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/golang.org/x/net/http2/pipe_test.go delete mode 100644 vendor/golang.org/x/net/http2/server.go delete mode 100644 vendor/golang.org/x/net/http2/server_push_test.go delete mode 100644 vendor/golang.org/x/net/http2/server_test.go delete mode 100644 vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml delete mode 100644 vendor/golang.org/x/net/http2/transport.go delete mode 100644 vendor/golang.org/x/net/http2/transport_test.go delete mode 100644 vendor/golang.org/x/net/http2/write.go delete mode 100644 vendor/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_priority_test.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_random.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_random_test.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_test.go delete mode 100644 vendor/golang.org/x/net/http2/z_spec_test.go delete mode 100644 vendor/golang.org/x/net/icmp/dstunreach.go delete mode 100644 vendor/golang.org/x/net/icmp/echo.go delete mode 100644 vendor/golang.org/x/net/icmp/endpoint.go delete mode 100644 vendor/golang.org/x/net/icmp/example_test.go delete mode 100644 vendor/golang.org/x/net/icmp/extension.go delete mode 100644 vendor/golang.org/x/net/icmp/extension_test.go delete mode 100644 vendor/golang.org/x/net/icmp/helper_posix.go delete mode 100644 vendor/golang.org/x/net/icmp/interface.go delete mode 100644 vendor/golang.org/x/net/icmp/ipv4.go delete mode 100644 vendor/golang.org/x/net/icmp/ipv4_test.go delete mode 100644 vendor/golang.org/x/net/icmp/ipv6.go delete mode 100644 vendor/golang.org/x/net/icmp/listen_posix.go delete mode 100644 vendor/golang.org/x/net/icmp/listen_stub.go delete mode 100644 vendor/golang.org/x/net/icmp/message.go delete mode 100644 vendor/golang.org/x/net/icmp/message_test.go delete mode 100644 vendor/golang.org/x/net/icmp/messagebody.go delete mode 100644 vendor/golang.org/x/net/icmp/mpls.go delete mode 100644 vendor/golang.org/x/net/icmp/multipart.go delete mode 100644 vendor/golang.org/x/net/icmp/multipart_test.go delete mode 100644 vendor/golang.org/x/net/icmp/packettoobig.go delete mode 100644 vendor/golang.org/x/net/icmp/paramprob.go delete mode 100644 vendor/golang.org/x/net/icmp/ping_test.go delete mode 100644 vendor/golang.org/x/net/icmp/sys_freebsd.go delete mode 100644 vendor/golang.org/x/net/icmp/timeexceeded.go delete mode 100644 vendor/golang.org/x/net/idna/example_test.go delete mode 100644 vendor/golang.org/x/net/idna/idna.go delete mode 100644 vendor/golang.org/x/net/idna/idna_test.go delete mode 100644 vendor/golang.org/x/net/idna/punycode.go delete mode 100644 vendor/golang.org/x/net/idna/punycode_test.go delete mode 100644 vendor/golang.org/x/net/idna/tables.go delete mode 100644 vendor/golang.org/x/net/idna/trie.go delete mode 100644 vendor/golang.org/x/net/idna/trieval.go delete mode 100644 vendor/golang.org/x/net/internal/iana/gen.go delete mode 100644 vendor/golang.org/x/net/internal/nettest/helper_bsd.go delete mode 100644 vendor/golang.org/x/net/internal/nettest/helper_nobsd.go delete mode 100644 vendor/golang.org/x/net/internal/nettest/helper_posix.go delete mode 100644 vendor/golang.org/x/net/internal/nettest/helper_stub.go delete mode 100644 vendor/golang.org/x/net/internal/nettest/helper_unix.go delete mode 100644 vendor/golang.org/x/net/internal/nettest/helper_windows.go delete mode 100644 vendor/golang.org/x/net/internal/nettest/interface.go delete mode 100644 vendor/golang.org/x/net/internal/nettest/rlimit.go delete mode 100644 vendor/golang.org/x/net/internal/nettest/stack.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_linux.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go delete mode 100644 vendor/golang.org/x/net/internal/socket/socket_test.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socks/client.go create mode 100644 vendor/golang.org/x/net/internal/socks/socks.go delete mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/bpf_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv4/example_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/gen.go delete mode 100644 vendor/golang.org/x/net/ipv4/header_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/icmp_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/multicast_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/multicastlistener_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/multicastsockopt_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/readwrite_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/unicast_test.go delete mode 100644 vendor/golang.org/x/net/ipv4/unicastsockopt_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/bpf_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/example_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/gen.go delete mode 100644 vendor/golang.org/x/net/ipv6/header_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/mocktransponder_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/multicast_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/multicastlistener_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/multicastsockopt_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/readwrite_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/sockopt_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/unicast_test.go delete mode 100644 vendor/golang.org/x/net/ipv6/unicastsockopt_test.go delete mode 100644 vendor/golang.org/x/net/lex/httplex/httplex.go delete mode 100644 vendor/golang.org/x/net/lex/httplex/httplex_test.go delete mode 100644 vendor/golang.org/x/net/lif/address.go delete mode 100644 vendor/golang.org/x/net/lif/address_test.go delete mode 100644 vendor/golang.org/x/net/lif/binary.go delete mode 100644 vendor/golang.org/x/net/lif/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/lif/lif.go delete mode 100644 vendor/golang.org/x/net/lif/link.go delete mode 100644 vendor/golang.org/x/net/lif/link_test.go delete mode 100644 vendor/golang.org/x/net/lif/sys.go delete mode 100644 vendor/golang.org/x/net/lif/sys_solaris_amd64.s delete mode 100644 vendor/golang.org/x/net/lif/syscall.go delete mode 100644 vendor/golang.org/x/net/lif/zsys_solaris_amd64.go delete mode 100644 vendor/golang.org/x/net/nettest/conntest.go delete mode 100644 vendor/golang.org/x/net/nettest/conntest_go16.go delete mode 100644 vendor/golang.org/x/net/nettest/conntest_go17.go delete mode 100644 vendor/golang.org/x/net/nettest/conntest_test.go delete mode 100644 vendor/golang.org/x/net/netutil/listen.go delete mode 100644 vendor/golang.org/x/net/netutil/listen_test.go delete mode 100644 vendor/golang.org/x/net/proxy/per_host_test.go delete mode 100644 vendor/golang.org/x/net/proxy/proxy_test.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/list.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/list_test.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/table.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/table_test.go delete mode 100644 vendor/golang.org/x/net/route/address.go delete mode 100644 vendor/golang.org/x/net/route/address_darwin_test.go delete mode 100644 vendor/golang.org/x/net/route/address_test.go delete mode 100644 vendor/golang.org/x/net/route/binary.go delete mode 100644 vendor/golang.org/x/net/route/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/route/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/route/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/route/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/route/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/route/interface.go delete mode 100644 vendor/golang.org/x/net/route/interface_announce.go delete mode 100644 vendor/golang.org/x/net/route/interface_classic.go delete mode 100644 vendor/golang.org/x/net/route/interface_freebsd.go delete mode 100644 vendor/golang.org/x/net/route/interface_multicast.go delete mode 100644 vendor/golang.org/x/net/route/interface_openbsd.go delete mode 100644 vendor/golang.org/x/net/route/message.go delete mode 100644 vendor/golang.org/x/net/route/message_darwin_test.go delete mode 100644 vendor/golang.org/x/net/route/message_freebsd_test.go delete mode 100644 vendor/golang.org/x/net/route/message_test.go delete mode 100644 vendor/golang.org/x/net/route/route.go delete mode 100644 vendor/golang.org/x/net/route/route_classic.go delete mode 100644 vendor/golang.org/x/net/route/route_openbsd.go delete mode 100644 vendor/golang.org/x/net/route/route_test.go delete mode 100644 vendor/golang.org/x/net/route/sys.go delete mode 100644 vendor/golang.org/x/net/route/sys_darwin.go delete mode 100644 vendor/golang.org/x/net/route/sys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/route/sys_freebsd.go delete mode 100644 vendor/golang.org/x/net/route/sys_netbsd.go delete mode 100644 vendor/golang.org/x/net/route/sys_openbsd.go delete mode 100644 vendor/golang.org/x/net/route/syscall.go delete mode 100644 vendor/golang.org/x/net/route/zsys_darwin.go delete mode 100644 vendor/golang.org/x/net/route/zsys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/route/zsys_freebsd_386.go delete mode 100644 vendor/golang.org/x/net/route/zsys_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/net/route/zsys_freebsd_arm.go delete mode 100644 vendor/golang.org/x/net/route/zsys_netbsd.go delete mode 100644 vendor/golang.org/x/net/route/zsys_openbsd.go delete mode 100644 vendor/golang.org/x/net/trace/events.go delete mode 100644 vendor/golang.org/x/net/trace/histogram.go delete mode 100644 vendor/golang.org/x/net/trace/histogram_test.go delete mode 100644 vendor/golang.org/x/net/trace/trace.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go16.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go17.go delete mode 100644 vendor/golang.org/x/net/trace/trace_test.go delete mode 100644 vendor/golang.org/x/net/webdav/file.go delete mode 100644 vendor/golang.org/x/net/webdav/file_go1.6.go delete mode 100644 vendor/golang.org/x/net/webdav/file_go1.7.go delete mode 100644 vendor/golang.org/x/net/webdav/file_test.go delete mode 100644 vendor/golang.org/x/net/webdav/if.go delete mode 100644 vendor/golang.org/x/net/webdav/if_test.go delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/README delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/atom_test.go delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/example_test.go delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/marshal.go delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/read.go delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/read_test.go delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/xml.go delete mode 100644 vendor/golang.org/x/net/webdav/internal/xml/xml_test.go delete mode 100644 vendor/golang.org/x/net/webdav/litmus_test_server.go delete mode 100644 vendor/golang.org/x/net/webdav/lock.go delete mode 100644 vendor/golang.org/x/net/webdav/lock_test.go delete mode 100644 vendor/golang.org/x/net/webdav/prop.go delete mode 100644 vendor/golang.org/x/net/webdav/prop_test.go delete mode 100644 vendor/golang.org/x/net/webdav/webdav.go delete mode 100644 vendor/golang.org/x/net/webdav/webdav_test.go delete mode 100644 vendor/golang.org/x/net/webdav/xml.go delete mode 100644 vendor/golang.org/x/net/webdav/xml_test.go delete mode 100644 vendor/golang.org/x/net/websocket/dial_test.go delete mode 100644 vendor/golang.org/x/net/websocket/exampledial_test.go delete mode 100644 vendor/golang.org/x/net/websocket/examplehandler_test.go delete mode 100644 vendor/golang.org/x/net/websocket/hybi_test.go delete mode 100644 vendor/golang.org/x/net/websocket/websocket_test.go delete mode 100644 vendor/golang.org/x/net/xsrftoken/xsrf.go delete mode 100644 vendor/golang.org/x/net/xsrftoken/xsrf_test.go create mode 100644 vendor/modules.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..3029552b --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +bin/ +/pkg/ diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 5d96f84d..00000000 --- a/Gopkg.lock +++ /dev/null @@ -1,93 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "code.linksmart.eu/com/go-sec" - packages = ["auth/keycloak/obtainer","auth/keycloak/validator","auth/obtainer","auth/validator","authz"] - revision = "5abeee54723444678d878c5bb556e5c4218f69ce" - -[[projects]] - branch = "master" - name = "code.linksmart.eu/sc/service-catalog" - packages = ["discovery","service","utils"] - revision = "3b17571715d89b57e486747fb3b6ae28edef9a5d" - -[[projects]] - branch = "master" - name = "github.com/ancientlore/go-avltree" - packages = ["."] - revision = "4ba4b949e04ae520ba87de82bb82d8f3a6be7e11" - -[[projects]] - name = "github.com/codegangsta/negroni" - packages = ["."] - revision = "fde5e16d32adc7ad637e9cd9ad21d4ebc6192535" - version = "v0.2.0" - -[[projects]] - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" - version = "v3.0.0" - -[[projects]] - branch = "master" - name = "github.com/golang/snappy" - packages = ["."] - revision = "553a641470496b2327abcac10b36396bd98e45c9" - -[[projects]] - name = "github.com/gorilla/context" - packages = ["."] - revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a" - version = "v1.1" - -[[projects]] - name = "github.com/gorilla/mux" - packages = ["."] - revision = "24fca303ac6da784b9e8269f724ddeb0b2eea5e7" - version = "v1.5.0" - -[[projects]] - name = "github.com/justinas/alice" - packages = ["."] - revision = "052b8b6c18edb9db317af86806d8e00ebaa94160" - version = "1.0.0" - -[[projects]] - branch = "master" - name = "github.com/miekg/dns" - packages = ["."] - revision = "e4205768578dc90c2669e75a2f8a8bf77e3083a4" - -[[projects]] - branch = "master" - name = "github.com/oleksandr/bonjour" - packages = ["."] - revision = "5dcf00d8b228be86307f952f550f2191d956b9e2" - -[[projects]] - name = "github.com/pborman/uuid" - packages = ["."] - revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" - version = "v1.1" - -[[projects]] - branch = "master" - name = "github.com/syndtr/goleveldb" - packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"] - revision = "b89cc31ef7977104127d34c1bd31ebd1a9db2199" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"] - revision = "8351a756f30f1297fe94bbf4b767ec589c6ea6d0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "cd6aa8e87c2ce31e0b8c24151f66bb508e3c3f53de1f3db00d35730d71af4065" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index c2d8bf78..00000000 --- a/Gopkg.toml +++ /dev/null @@ -1,62 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "code.linksmart.eu/com/go-sec" - -[[constraint]] - branch = "master" - name = "code.linksmart.eu/sc/service-catalog" - -[[constraint]] - branch = "master" - name = "github.com/ancientlore/go-avltree" - -[[constraint]] - name = "github.com/codegangsta/negroni" - version = "0.2.0" - -[[constraint]] - name = "github.com/gorilla/context" - version = "1.1.0" - -[[constraint]] - name = "github.com/gorilla/mux" - version = "1.5.0" - -[[constraint]] - name = "github.com/justinas/alice" - version = "1.0.0" - -[[constraint]] - branch = "master" - name = "github.com/oleksandr/bonjour" - -[[constraint]] - name = "github.com/pborman/uuid" - version = "1.1.0" - -[[constraint]] - branch = "master" - name = "github.com/syndtr/goleveldb" diff --git a/catalog/http.go b/catalog/http.go index 0a58ce68..2e683e0e 100644 --- a/catalog/http.go +++ b/catalog/http.go @@ -4,14 +4,13 @@ package catalog import ( "errors" + "fmt" "io" "net/http" "path/filepath" "strings" - "fmt" - - "code.linksmart.eu/com/go-sec/auth/obtainer" + "github.com/linksmart/go-sec/auth/obtainer" ) // Serves static and all /static/ctx files as ld+json diff --git a/catalog/remoteclient.go b/catalog/remoteclient.go index 572d1cbe..5ec50706 100644 --- a/catalog/remoteclient.go +++ b/catalog/remoteclient.go @@ -10,7 +10,7 @@ import ( "net/url" "strings" - "code.linksmart.eu/com/go-sec/auth/obtainer" + "github.com/linksmart/go-sec/auth/obtainer" ) type RemoteCatalogClient struct { diff --git a/catalog/td_schema.go b/catalog/td_schema.go index bbe0d9a7..802c763a 100644 --- a/catalog/td_schema.go +++ b/catalog/td_schema.go @@ -1,4 +1,4 @@ -package thing_description +package catalog //Schema for the const WoTSchema = ` diff --git a/catalog/utils.go b/catalog/utils.go index 23328f7f..a83a38c6 100644 --- a/catalog/utils.go +++ b/catalog/utils.go @@ -7,8 +7,8 @@ import ( "sync" "time" - "code.linksmart.eu/com/go-sec/auth/obtainer" - "code.linksmart.eu/sc/service-catalog/discovery" + "github.com/linksmart/go-sec/auth/obtainer" + "github.com/linksmart/service-catalog/discovery" ) const ( diff --git a/catalog/wot_thing_description.go b/catalog/wot_thing_description.go index d4c57043..b70e76de 100644 --- a/catalog/wot_thing_description.go +++ b/catalog/wot_thing_description.go @@ -1,7 +1,10 @@ -package thing_description +package catalog import ( + "fmt" + "strings" "time" + "github.com/xeipuuv/gojsonschema" ) diff --git a/config.go b/config.go index 289c5185..2abcbec1 100644 --- a/config.go +++ b/config.go @@ -10,8 +10,8 @@ import ( "net/url" "strings" - "code.linksmart.eu/com/go-sec/authz" - utils "code.linksmart.eu/rc/resource-catalog/catalog" + "github.com/linksmart/go-sec/authz" + utils "github.com/linksmart/resource-catalog/catalog" ) type Config struct { diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..fa6d6c72 --- /dev/null +++ b/go.mod @@ -0,0 +1,23 @@ +module github.com/linksmart/resource-catalog + +go 1.13 + +require ( + github.com/ancientlore/go-avltree v0.0.0-20180928192319-da83409fdfb7 + github.com/codegangsta/negroni v0.2.0 + github.com/eclipse/paho.mqtt.golang v1.2.0 // indirect + github.com/gorilla/context v1.1.1 + github.com/gorilla/mux v1.5.0 + github.com/justinas/alice v0.0.0-20160512134231-052b8b6c18ed + github.com/linksmart/go-sec v1.0.1 + github.com/linksmart/service-catalog v2.3.4+incompatible + github.com/linksmart/service-catalog/v2 v2.4.1 + github.com/oleksandr/bonjour v0.0.0-20160508152359-5dcf00d8b228 + github.com/onsi/ginkgo v1.12.0 // indirect + github.com/onsi/gomega v1.9.0 // indirect + github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c + github.com/satori/go.uuid v1.2.0 // indirect + github.com/stretchr/testify v1.5.1 // indirect + github.com/syndtr/goleveldb v1.0.0 + github.com/xeipuuv/gojsonschema v1.2.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..7b90c803 --- /dev/null +++ b/go.sum @@ -0,0 +1,89 @@ +github.com/ancientlore/go-avltree v0.0.0-20180928192319-da83409fdfb7 h1:oIquyipmpCIO2I2+4MrsZ+PgGYUeR0HpIUrQxrrKiRU= +github.com/ancientlore/go-avltree v0.0.0-20180928192319-da83409fdfb7/go.mod h1:nfJ32Li6TWi3iVi9M3XF19FNqdfTlmoI87CPVgtgqoc= +github.com/codegangsta/negroni v0.2.0 h1:MqNkcR831ZSHwZmP9IdlZWGMxc1T8briBhBW649DkXs= +github.com/codegangsta/negroni v0.2.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.0.0+incompatible h1:nfVqwkkhaRUethVJaQf5TUFdFr3YUF4lJBTf/F2XwVI= +github.com/dgrijalva/jwt-go v3.0.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/eclipse/paho.mqtt.golang v1.1.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/farshidtz/elog v1.0.1 h1:GXdAmbHVyJ5l6V37gLK+Pedjd/sMRt0RPLzB/HcVFj0= +github.com/farshidtz/elog v1.0.1/go.mod h1:OXTASC4gfW1KTSCg/qieXdyo9SoUA+c4U8qGkp6DW9I= +github.com/farshidtz/mqtt-match v1.0.1 h1:VEBojQL9P5F7E3gu9XULIUZnzZknn7byF2rJ7RX20cQ= +github.com/farshidtz/mqtt-match v1.0.1/go.mod h1:Kwf4JfzMhR3aPmVY5jqTkLXY/E3DgHsRlCUekyqdQHw= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.4.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.5.0 h1:mq8bRov+5x+pZNR/uAHyUEgovR9gLgYFwDQIeuYi9TM= +github.com/gorilla/mux v1.5.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/justinas/alice v0.0.0-20160512134231-052b8b6c18ed h1:Ab4XhecWusSSeIfQ2eySh7kffQ1Wsv6fNSkwefr6AVQ= +github.com/justinas/alice v0.0.0-20160512134231-052b8b6c18ed/go.mod h1:oLH0CmIaxCGXD67VKGR5AacGXZSMznlmeqM8RzPrcY8= +github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/linksmart/go-sec v1.0.1 h1:UNeRj81/KHCy4hkFcZn7x5N/nM+uNxe+xsLBQzNF7kg= +github.com/linksmart/go-sec v1.0.1/go.mod h1:bTksBzP6fCEwIM43z8m3jSRa4YIAWdUwMBYjcoftm1c= +github.com/linksmart/service-catalog v2.3.4+incompatible h1:86kWM0OTTsMWvo7admKYJ+t2aWIrnD/X5pNgim5hpcU= +github.com/linksmart/service-catalog v2.3.4+incompatible/go.mod h1:QbMEQV7MkcbUCMlGhd/BRNDoOg23VP3/RGBrmSpnL2I= +github.com/linksmart/service-catalog/v2 v2.4.1 h1:/gtc/9AvLGbabYc0wsfGRhqpS2s68v9wH7WTQbGLG+c= +github.com/linksmart/service-catalog/v2 v2.4.1/go.mod h1:aIfziWy3rusDlaoEIeoML6XV68RVFdNm+i58X9inXcY= +github.com/miekg/dns v0.0.0-20170818131442-e4205768578d h1:M1nGptTlM6l6aT5MUYA5XExwSjnIPHA+xEOWobbMU6g= +github.com/miekg/dns v0.0.0-20170818131442-e4205768578d/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/oleksandr/bonjour v0.0.0-20160508152359-5dcf00d8b228 h1:Cvfd2dOlXIPTeEkOT/h8PyK4phBngOM4at9/jlgy7d4= +github.com/oleksandr/bonjour v0.0.0-20160508152359-5dcf00d8b228/go.mod h1:MGuVJ1+5TX1SCoO2Sx0eAnjpdRytYla2uC1YIZfkC9c= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/satori/go.uuid v1.1.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/main.go b/main.go index a2b93799..989e3cdb 100644 --- a/main.go +++ b/main.go @@ -14,12 +14,12 @@ import ( "syscall" "time" - _ "code.linksmart.eu/com/go-sec/auth/keycloak/obtainer" - _ "code.linksmart.eu/com/go-sec/auth/keycloak/validator" - "code.linksmart.eu/com/go-sec/auth/obtainer" - "code.linksmart.eu/com/go-sec/auth/validator" - catalog "code.linksmart.eu/rc/resource-catalog/catalog" - sc "code.linksmart.eu/sc/service-catalog/service" + _ "github.com/linksmart/go-sec/auth/keycloak/obtainer" + _ "github.com/linksmart/go-sec/auth/keycloak/validator" + "github.com/linksmart/go-sec/auth/obtainer" + "github.com/linksmart/go-sec/auth/validator" + catalog "github.com/linksmart/resource-catalog/catalog" + sc "github.com/linksmart/service-catalog/catalog" "github.com/codegangsta/negroni" "github.com/gorilla/context" "github.com/justinas/alice" diff --git a/registration.go b/registration.go index db649f1a..9b243b59 100644 --- a/registration.go +++ b/registration.go @@ -8,8 +8,8 @@ import ( "net/url" "strings" - catalog "code.linksmart.eu/rc/resource-catalog/catalog" - sc "code.linksmart.eu/sc/service-catalog/service" + catalog "github.com/linksmart/resource-catalog/catalog" + sc "github.com/linksmart/service-catalog/service" ) const ( diff --git a/vendor/code.linksmart.eu/com/go-sec/.gitignore b/vendor/code.linksmart.eu/com/go-sec/.gitignore deleted file mode 100644 index 2a0f191b..00000000 --- a/vendor/code.linksmart.eu/com/go-sec/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# gb build artefacts -bin/ -pkg/ diff --git a/vendor/code.linksmart.eu/com/go-sec/Gopkg.lock b/vendor/code.linksmart.eu/com/go-sec/Gopkg.lock deleted file mode 100644 index 0049b53a..00000000 --- a/vendor/code.linksmart.eu/com/go-sec/Gopkg.lock +++ /dev/null @@ -1,15 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" - version = "v3.0.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "bd8d3fa140c5adaee5d3b465c78a1170b2a167f1f570ca398d3b36ea7174c004" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/code.linksmart.eu/com/go-sec/Gopkg.toml b/vendor/code.linksmart.eu/com/go-sec/Gopkg.toml deleted file mode 100644 index d0a79e19..00000000 --- a/vendor/code.linksmart.eu/com/go-sec/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - name = "github.com/dgrijalva/jwt-go" - version = "3.0.0" diff --git a/vendor/code.linksmart.eu/com/go-sec/README.md b/vendor/code.linksmart.eu/com/go-sec/README.md deleted file mode 100644 index 210a35c5..00000000 --- a/vendor/code.linksmart.eu/com/go-sec/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# LinkSmart Go-Sec -Linksmart security packages for Go (Golang) services. - -For more information, refer to docs: -* [Authentication](https://docs.linksmart.eu/display/LC/Authentication) -* [Authorization](https://docs.linksmart.eu/display/LC/Authorization) \ No newline at end of file diff --git a/vendor/code.linksmart.eu/com/go-sec/auth/README.md b/vendor/code.linksmart.eu/com/go-sec/auth/README.md deleted file mode 100644 index dc899eca..00000000 --- a/vendor/code.linksmart.eu/com/go-sec/auth/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# LinkSmart® LocalConnect Authentication -The package `linksmart.eu/lc/sec/auth` provides interfaces to obtain and validate service tickets. - -* Overview [Authentication](https://linksmart.eu/redmine/projects/linksmart-local-connect/wiki/Authentication) -* How to use [Authentication Package](https://linksmart.eu/redmine/projects/linksmart-local-connect/wiki/Authentication_Package) -* Optional [Authorization](https://linksmart.eu/redmine/projects/linksmart-local-connect/wiki/Authorization) \ No newline at end of file diff --git a/vendor/code.linksmart.eu/com/go-sec/authz/README.md b/vendor/code.linksmart.eu/com/go-sec/authz/README.md deleted file mode 100644 index 29ac34a5..00000000 --- a/vendor/code.linksmart.eu/com/go-sec/authz/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# LinkSmart® LocalConnect Authorization -The package `linksmart.eu/lc/sec/authz` provides a simple rule-based authorization that can be used to implement access control in services. This package adds authorization abilities to [LocalConnect Authentication](https://linksmart.eu/redmine/projects/linksmart-local-connect/wiki/Authentication). - -* Overview [Authorization](https://linksmart.eu/redmine/projects/linksmart-local-connect/wiki/Authorization) \ No newline at end of file diff --git a/vendor/code.linksmart.eu/sc/service-catalog/.gitignore b/vendor/code.linksmart.eu/sc/service-catalog/.gitignore deleted file mode 100644 index 2a0f191b..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# gb build artefacts -bin/ -pkg/ diff --git a/vendor/code.linksmart.eu/sc/service-catalog/Dockerfile b/vendor/code.linksmart.eu/sc/service-catalog/Dockerfile deleted file mode 100644 index 0f618435..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM golang:1.8-alpine - -# copy default config file and code -COPY sample_conf/* /conf/ -COPY . /home/src/code.linksmart.eu/sc/service-catalog - -# build the code -ENV GOPATH /home -RUN go install code.linksmart.eu/sc/service-catalog - -WORKDIR /home - -VOLUME /conf /data -EXPOSE 8082 - -ENTRYPOINT ["./bin/service-catalog"] -CMD ["-conf", "/conf/docker.json"] diff --git a/vendor/code.linksmart.eu/sc/service-catalog/Gopkg.lock b/vendor/code.linksmart.eu/sc/service-catalog/Gopkg.lock deleted file mode 100644 index 8151f50d..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/Gopkg.lock +++ /dev/null @@ -1,87 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "code.linksmart.eu/com/go-sec" - packages = ["auth/keycloak/obtainer","auth/keycloak/validator","auth/obtainer","auth/validator","authz"] - revision = "5abeee54723444678d878c5bb556e5c4218f69ce" - -[[projects]] - branch = "master" - name = "github.com/ancientlore/go-avltree" - packages = ["."] - revision = "4ba4b949e04ae520ba87de82bb82d8f3a6be7e11" - -[[projects]] - name = "github.com/codegangsta/negroni" - packages = ["."] - revision = "fde5e16d32adc7ad637e9cd9ad21d4ebc6192535" - version = "v0.2.0" - -[[projects]] - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" - version = "v3.0.0" - -[[projects]] - branch = "master" - name = "github.com/golang/snappy" - packages = ["."] - revision = "553a641470496b2327abcac10b36396bd98e45c9" - -[[projects]] - name = "github.com/gorilla/context" - packages = ["."] - revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a" - version = "v1.1" - -[[projects]] - name = "github.com/gorilla/mux" - packages = ["."] - revision = "bcd8bc72b08df0f70df986b97f95590779502d31" - version = "v1.4.0" - -[[projects]] - name = "github.com/justinas/alice" - packages = ["."] - revision = "052b8b6c18edb9db317af86806d8e00ebaa94160" - version = "1.0.0" - -[[projects]] - branch = "master" - name = "github.com/miekg/dns" - packages = ["."] - revision = "e4205768578dc90c2669e75a2f8a8bf77e3083a4" - -[[projects]] - branch = "master" - name = "github.com/oleksandr/bonjour" - packages = ["."] - revision = "5dcf00d8b228be86307f952f550f2191d956b9e2" - -[[projects]] - name = "github.com/pborman/uuid" - packages = ["."] - revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" - version = "v1.1" - -[[projects]] - branch = "master" - name = "github.com/syndtr/goleveldb" - packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"] - revision = "b89cc31ef7977104127d34c1bd31ebd1a9db2199" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"] - revision = "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "c1cb082c037aef0889c23ef4ac1f916437b5aacba72b77cf331f35396dc45d04" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/code.linksmart.eu/sc/service-catalog/Gopkg.toml b/vendor/code.linksmart.eu/sc/service-catalog/Gopkg.toml deleted file mode 100644 index 6ca36632..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/Gopkg.toml +++ /dev/null @@ -1,58 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "code.linksmart.eu/com/go-sec" - -[[constraint]] - branch = "master" - name = "github.com/ancientlore/go-avltree" - -[[constraint]] - name = "github.com/codegangsta/negroni" - version = "0.2.0" - -[[constraint]] - name = "github.com/gorilla/context" - version = "1.1.0" - -[[constraint]] - name = "github.com/gorilla/mux" - version = "1.4.0" - -[[constraint]] - name = "github.com/justinas/alice" - version = "1.0.0" - -[[constraint]] - branch = "master" - name = "github.com/oleksandr/bonjour" - -[[constraint]] - name = "github.com/pborman/uuid" - version = "1.1.0" - -[[constraint]] - branch = "master" - name = "github.com/syndtr/goleveldb" diff --git a/vendor/code.linksmart.eu/sc/service-catalog/README.md b/vendor/code.linksmart.eu/sc/service-catalog/README.md deleted file mode 100644 index 61eb0d5b..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# LinkSmart Service Catalog - -## Compile from source -``` -git clone https://code.linksmart.eu/scm/sc/service-catalog.git src/code.linksmart.eu/sc/service-catalog -export GOPATH=`pwd` -go install code.linksmart.eu/sc/service-catalog -``` - -## Docker -The following command runs service catalog with the default configurations: -``` -docker run -p 8082:8082 docker.linksmart.eu/sc/service-catalog -``` - -## Development -The dependencies of this package are managed by [dep](https://github.com/golang/dep). \ No newline at end of file diff --git a/vendor/code.linksmart.eu/sc/service-catalog/apidoc/swagger.json b/vendor/code.linksmart.eu/sc/service-catalog/apidoc/swagger.json deleted file mode 100644 index d477dee8..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/apidoc/swagger.json +++ /dev/null @@ -1,493 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "version": "1.0.0", - "title": "LinkSmart Service Catalog API", - "description": "API documetnation of the LinkSmart® Service Catalog\n\nFor more information, visit the [Wiki](https://linksmart.eu/redmine/projects/linksmart-local-connect/wiki/Service_Catalog_API).\n" - }, - "basePath": "/sc", - "produces": [ - "application/ld+json" - ], - "tags": [ - { - "name": "sc", - "description": "Service Catalog" - } - ], - "definitions": { - "WritableService": { - "title": "Service", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "meta": { - "type": "object" - }, - "representation": { - "type": "object" - }, - "protocols": { - "type": "array", - "items": { - "type": "object", - "properties": { - "type": { - "type": "string" - }, - "endpoint": { - "type": "object" - }, - "methods": { - "type": "array", - "items": { - "type": "string" - } - }, - "content-types": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "ttl": { - "type": "integer" - } - } - }, - "ReadableService": { - "title": "Service", - "type": "object", - "required": [ - "protocols" - ], - "properties": { - "id": { - "type": "string" - }, - "@context": { - "type": "string", - "format": "url" - }, - "type": { - "type": "string" - }, - "url": { - "type": "string" - }, - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "meta": { - "type": "object" - }, - "representation": { - "type": "object" - }, - "protocols": { - "type": "array", - "items": { - "type": "object", - "properties": { - "type": { - "type": "string" - }, - "endpoint": { - "type": "object" - }, - "methods": { - "type": "array", - "items": { - "type": "string" - } - }, - "content-types": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "ttl": { - "type": "integer" - }, - "created": { - "type": "string", - "format": "date-time" - }, - "updated": { - "type": "string", - "format": "date-time" - }, - "expires": { - "type": "string", - "format": "date-time" - } - } - }, - "APIIndex": { - "type": "object", - "properties": { - "@context": { - "type": "string", - "format": "url" - }, - "id": { - "type": "string" - }, - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "api_version": { - "type": "string" - }, - "services": { - "type": "array", - "items": { - "$ref": "#/definitions/ReadableService" - } - }, - "page": { - "type": "integer" - }, - "per_page": { - "type": "integer" - }, - "total": { - "type": "integer" - } - } - }, - "ErrorResponse": { - "type": "object", - "properties": { - "code": { - "type": "integer" - }, - "message": { - "type": "string" - } - } - } - }, - "responses": { - "RespBadRequest": { - "description": "Bad Request", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - }, - "RespUnauthorized": { - "description": "Unauthorized", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - }, - "RespForbidden": { - "description": "Forbidden", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - }, - "RespNotfound": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - }, - "RespConflict": { - "description": "Conflict", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - }, - "RespInternalServerError": { - "description": "Internal Server Error", - "schema": { - "$ref": "#/definitions/ErrorResponse" - } - } - }, - "parameters": { - "ParamPage": { - "name": "page", - "in": "query", - "description": "Page number in the pagination", - "required": false, - "type": "number", - "format": "integer" - }, - "ParamPerPage": { - "name": "per_page", - "in": "query", - "description": "Number of entries per page", - "required": false, - "type": "number", - "format": "integer" - } - }, - "paths": { - "/": { - "get": { - "tags": [ - "sc" - ], - "summary": "Retrieves API index.", - "parameters": [ - { - "$ref": "#/parameters/ParamPage" - }, - { - "$ref": "#/parameters/ParamPerPage" - } - ], - "responses": { - "200": { - "description": "Successful response", - "schema": { - "$ref": "#/definitions/APIIndex" - } - }, - "401": { - "$ref": "#/responses/RespUnauthorized" - }, - "403": { - "$ref": "#/responses/RespForbidden" - }, - "500": { - "$ref": "#/responses/RespInternalServerError" - } - } - }, - "post": { - "tags": [ - "sc" - ], - "summary": "Creates new `Service` object", - "parameters": [ - { - "name": "service", - "description": "Service to be created", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/WritableService" - } - } - ], - "responses": { - "201": { - "description": "Created successfully", - "headers": { - "Location": { - "description": "URL of the newly created Service", - "type": "string" - } - } - }, - "400": { - "$ref": "#/responses/RespBadRequest" - }, - "401": { - "$ref": "#/responses/RespUnauthorized" - }, - "403": { - "$ref": "#/responses/RespForbidden" - }, - "500": { - "$ref": "#/responses/RespInternalServerError" - } - } - } - }, - "/{id}": { - "get": { - "tags": [ - "sc" - ], - "summary": "Retrieves a `Service` object", - "parameters": [ - { - "name": "id", - "in": "path", - "description": "ID of the `Service`", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "Successful response", - "schema": { - "$ref": "#/definitions/ReadableService" - } - }, - "400": { - "$ref": "#/responses/RespBadRequest" - }, - "401": { - "$ref": "#/responses/RespUnauthorized" - }, - "403": { - "$ref": "#/responses/RespForbidden" - }, - "404": { - "$ref": "#/responses/RespNotfound" - }, - "500": { - "$ref": "#/responses/RespInternalServerError" - } - } - }, - "put": { - "tags": [ - "sc" - ], - "summary": "Updates the existing `Service` or creates a new one (with the provided ID)", - "parameters": [ - { - "name": "id", - "in": "path", - "description": "ID of the `Service`", - "required": true, - "type": "string" - }, - { - "name": "service", - "description": "Service to be created", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/WritableService" - } - } - ], - "responses": { - "200": { - "description": "Service updated successfully" - }, - "201": { - "description": "A new service is created" - }, - "400": { - "$ref": "#/responses/RespBadRequest" - }, - "401": { - "$ref": "#/responses/RespUnauthorized" - }, - "403": { - "$ref": "#/responses/RespForbidden" - }, - "409": { - "$ref": "#/responses/RespConflict" - }, - "500": { - "$ref": "#/responses/RespInternalServerError" - } - } - }, - "delete": { - "tags": [ - "sc" - ], - "summary": "Deletes the `Service`", - "parameters": [ - { - "name": "id", - "in": "path", - "description": "ID of the `Service`", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "Successful response" - }, - "401": { - "$ref": "#/responses/RespUnauthorized" - }, - "403": { - "$ref": "#/responses/RespForbidden" - }, - "404": { - "$ref": "#/responses/RespNotfound" - }, - "500": { - "$ref": "#/responses/RespInternalServerError" - } - } - } - }, - "/{path}/{op}/{value}": { - "get": { - "tags": [ - "sc" - ], - "summary": "Service filtering API", - "parameters": [ - { - "name": "path", - "in": "path", - "description": "Dot-separated path in the registration JSON", - "required": true, - "type": "string" - }, - { - "name": "op", - "in": "path", - "description": "One of (equals, prefix, suffix, contains) string comparison operations", - "required": true, - "type": "string" - }, - { - "name": "value", - "in": "path", - "description": "The intended value/prefix/suffix/substring of the key identified by the path", - "required": true, - "type": "string" - }, - { - "$ref": "#/parameters/ParamPage" - }, - { - "$ref": "#/parameters/ParamPerPage" - } - ], - "responses": { - "200": { - "description": "Succcessful response", - "schema": { - "$ref": "#/definitions/APIIndex" - } - }, - "401": { - "$ref": "#/responses/RespUnauthorized" - }, - "403": { - "$ref": "#/responses/RespForbidden" - }, - "500": { - "$ref": "#/responses/RespInternalServerError" - } - } - } - } - } -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/config.go b/vendor/code.linksmart.eu/sc/service-catalog/config.go deleted file mode 100644 index 96290957..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/config.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/url" - "strings" - - "code.linksmart.eu/com/go-sec/authz" - "code.linksmart.eu/sc/service-catalog/service" -) - -type Config struct { - Description string `json:"description"` - DnssdEnabled bool `json:"dnssdEnabled"` - BindAddr string `json:"bindAddr"` - BindPort int `json:"bindPort"` - ApiLocation string `json:"apiLocation"` - StaticDir string `json:"staticDir"` - Storage StorageConfig `json:"storage"` - GC GCConfig `js:"gc"` - Auth ValidatorConf `json:"auth"` -} - -type StorageConfig struct { - Type string `json:"type"` - DSN string `json:"dsn"` -} - -var supportedBackends = map[string]bool{ - service.CatalogBackendMemory: true, - service.CatalogBackendLevelDB: true, -} - -// GCConfig describes configuration of the GlobalConnect -type GCConfig struct { - // URL of the Tunneling Service endpoint (aka NM REST API) - TunnelingService string `json:"tunnelingService"` -} - -func (c *Config) Validate() error { - var err error - if c.BindAddr == "" || c.BindPort == 0 { - err = fmt.Errorf("Empty host or port") - } - if !supportedBackends[c.Storage.Type] { - err = fmt.Errorf("Unsupported storage backend") - } - _, err = url.Parse(c.Storage.DSN) - if err != nil { - err = fmt.Errorf("storage DSN should be a valid URL") - } - if c.StaticDir == "" { - err = fmt.Errorf("staticDir must be defined") - } - if strings.HasSuffix(c.StaticDir, "/") { - err = fmt.Errorf("staticDir must not have a trailing slash") - } - if c.GC.TunnelingService != "" { - _, err := url.Parse(c.GC.TunnelingService) - if err != nil { - err = fmt.Errorf("gc tunnelingService must be a valid URL") - } - } - if c.Auth.Enabled { - // Validate ticket validator config - err = c.Auth.Validate() - if err != nil { - return err - } - } - - return err -} - -func loadConfig(confPath string) (*Config, error) { - file, err := ioutil.ReadFile(confPath) - if err != nil { - return nil, err - } - - config := new(Config) - err = json.Unmarshal(file, config) - if err != nil { - return nil, err - } - - if strings.HasSuffix(config.ApiLocation, "/") { - config.ApiLocation = strings.TrimSuffix(config.ApiLocation, "/") - } - - if err = config.Validate(); err != nil { - return nil, err - } - return config, nil -} - -// Ticket Validator Config -type ValidatorConf struct { - // Auth switch - Enabled bool `json:"enabled"` - // Authentication provider name - Provider string `json:"provider"` - // Authentication provider URL - ProviderURL string `json:"providerURL"` - // Service ID - ServiceID string `json:"serviceID"` - // Basic Authentication switch - BasicEnabled bool `json:"basicEnabled"` - // Authorization config - Authz *authz.Conf `json:"authorization"` -} - -func (c ValidatorConf) Validate() error { - - // Validate Provider - if c.Provider == "" { - return errors.New("Ticket Validator: Auth provider name (provider) is not specified.") - } - - // Validate ProviderURL - if c.ProviderURL == "" { - return errors.New("Ticket Validator: Auth provider URL (providerURL) is not specified.") - } - _, err := url.Parse(c.ProviderURL) - if err != nil { - return errors.New("Ticket Validator: Auth provider URL (providerURL) is invalid: " + err.Error()) - } - - // Validate ServiceID - if c.ServiceID == "" { - return errors.New("Ticket Validator: Auth Service ID (serviceID) is not specified.") - } - - // Validate Authorization - if c.Authz != nil { - if err := c.Authz.Validate(); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/discovery/init.go b/vendor/code.linksmart.eu/sc/service-catalog/discovery/init.go deleted file mode 100644 index 679ee226..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/discovery/init.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package discovery - -import ( - "log" - "os" - "strconv" -) - -var logger *log.Logger - -func init() { - logger = log.New(os.Stdout, "[discovery] ", 0) - - v, err := strconv.Atoi(os.Getenv("DEBUG")) - if err == nil && v == 1 { - logger.SetFlags(log.Ltime | log.Lshortfile) - } -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/init.go b/vendor/code.linksmart.eu/sc/service-catalog/init.go deleted file mode 100644 index b95e2245..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/init.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package main - -import ( - "log" - "os" - "strconv" -) - -var logger *log.Logger - -func init() { - logger = log.New(os.Stdout, "[main] ", 0) - - v, err := strconv.Atoi(os.Getenv("DEBUG")) - if err == nil && v == 1 { - logger.SetFlags(log.Ltime | log.Lshortfile) - } -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/main.go b/vendor/code.linksmart.eu/sc/service-catalog/main.go deleted file mode 100644 index 0e969f05..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/main.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package main - -import ( - "flag" - "fmt" - "mime" - "net/http" - "net/url" - "os" - "os/signal" - "strconv" - "syscall" - "time" - - _ "code.linksmart.eu/com/go-sec/auth/keycloak/validator" - "code.linksmart.eu/com/go-sec/auth/validator" - "code.linksmart.eu/sc/service-catalog/service" - "github.com/codegangsta/negroni" - "github.com/gorilla/context" - "github.com/justinas/alice" - "github.com/oleksandr/bonjour" -) - -var ( - confPath = flag.String("conf", "conf/service-catalog.json", "Service catalog configuration file path") -) - -func main() { - flag.Parse() - - config, err := loadConfig(*confPath) - if err != nil { - logger.Fatalf("Error reading config file %v: %v", *confPath, err) - } - - r, shutdownAPI, err := setupRouter(config) - if err != nil { - logger.Fatal(err.Error()) - } - - // Announce service using DNS-SD - var bonjourS *bonjour.Server - if config.DnssdEnabled { - bonjourS, err = bonjour.Register(config.Description, - service.DNSSDServiceType, - "", - config.BindPort, - []string{fmt.Sprintf("uri=%s", config.ApiLocation)}, - nil) - if err != nil { - logger.Printf("Failed to register DNS-SD service: %s", err.Error()) - } else { - logger.Println("Registered service via DNS-SD using type", service.DNSSDServiceType) - } - } - - // Setup signal catcher for the server's proper shutdown - c := make(chan os.Signal, 1) - signal.Notify(c, - syscall.SIGHUP, - syscall.SIGINT, - syscall.SIGTERM, - syscall.SIGQUIT) - go func() { - for _ = range c { - // sig is a ^C, handle it - - //TODO: put here the last will logic - - // Stop bonjour registration - if bonjourS != nil { - bonjourS.Shutdown() - time.Sleep(1e9) - } - - // Shutdown catalog API - err := shutdownAPI() - if err != nil { - logger.Println(err.Error()) - } - - logger.Println("Stopped") - os.Exit(0) - } - }() - - err = mime.AddExtensionType(".jsonld", "application/ld+json") - if err != nil { - logger.Println("ERROR: ", err.Error()) - } - - // Configure the middleware - n := negroni.New( - negroni.NewRecovery(), - negroni.NewLogger(), - &negroni.Static{ - Dir: http.Dir(config.StaticDir), - Prefix: service.StaticLocation, - IndexFile: "index.html", - }, - ) - // Mount router - n.UseHandler(r) - - // Start listener - endpoint := fmt.Sprintf("%s:%s", config.BindAddr, strconv.Itoa(config.BindPort)) - logger.Printf("Starting standalone Service Catalog at %v%v", endpoint, config.ApiLocation) - n.Run(endpoint) -} - -func setupRouter(config *Config) (*router, func() error, error) { - var listeners []service.Listener - // GC publisher if configured - if config.GC.TunnelingService != "" { - endpoint, _ := url.Parse(config.GC.TunnelingService) - listeners = append(listeners, service.NewGCPublisher(*endpoint)) - } - - // Setup API storage - var ( - storage service.CatalogStorage - err error - ) - switch config.Storage.Type { - case service.CatalogBackendMemory: - storage = service.NewMemoryStorage() - case service.CatalogBackendLevelDB: - storage, err = service.NewLevelDBStorage(config.Storage.DSN, nil) - if err != nil { - return nil, nil, fmt.Errorf("Failed to start LevelDB storage: %v", err.Error()) - } - default: - return nil, nil, fmt.Errorf("Could not create catalog API storage. Unsupported type: %v", config.Storage.Type) - } - - controller, err := service.NewController(storage, config.ApiLocation, listeners...) - if err != nil { - storage.Close() - return nil, nil, fmt.Errorf("Failed to start the controller: %v", err.Error()) - } - - // Create catalog API object - api := service.NewCatalogAPI( - controller, - config.ApiLocation, - service.StaticLocation, - config.Description, - ) - - commonHandlers := alice.New( - context.ClearHandler, - ) - - // Append auth handler if enabled - if config.Auth.Enabled { - // Setup ticket validator - v, err := validator.Setup( - config.Auth.Provider, - config.Auth.ProviderURL, - config.Auth.ServiceID, - config.Auth.BasicEnabled, - config.Auth.Authz) - if err != nil { - return nil, nil, err - } - - commonHandlers = commonHandlers.Append(v.Handler) - } - - // Configure http api router - r := newRouter() - // Handlers - r.get(config.ApiLocation, commonHandlers.ThenFunc(api.List)) - r.post(config.ApiLocation, commonHandlers.ThenFunc(api.Post)) - // Accept an id with zero or one slash: [^/]+/?[^/]* - // -> [^/]+ one or more of anything but slashes /? optional slash [^/]* zero or more of anything but slashes - r.get(config.ApiLocation+"/{id:[^/]+/?[^/]*}", commonHandlers.ThenFunc(api.Get)) - r.put(config.ApiLocation+"/{id:[^/]+/?[^/]*}", commonHandlers.ThenFunc(api.Put)) - r.delete(config.ApiLocation+"/{id:[^/]+/?[^/]*}", commonHandlers.ThenFunc(api.Delete)) - r.get(config.ApiLocation+"/{path}/{op}/{value:.*}", commonHandlers.ThenFunc(api.Filter)) - - return r, controller.Stop, nil -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/router.go b/vendor/code.linksmart.eu/sc/service-catalog/router.go deleted file mode 100644 index a9ed6396..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/router.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Fraunhofer Institute for Applied Information Technology FIT - -package main - -import ( - "fmt" - "net/http" - - "github.com/gorilla/mux" -) - -type router struct { - *mux.Router -} - -func newRouter() *router { - return &router{mux.NewRouter().StrictSlash(false).SkipClean(true)} -} - -func (r *router) get(path string, handler http.Handler) { - r.Methods("GET").Path(path).Handler(handler) - r.Methods("GET").Path(fmt.Sprintf("%s/", path)).Handler(handler) -} - -func (r *router) post(path string, handler http.Handler) { - r.Methods("POST").Path(path).Handler(handler) - r.Methods("POST").Path(fmt.Sprintf("%s/", path)).Handler(handler) -} - -func (r *router) put(path string, handler http.Handler) { - r.Methods("PUT").Path(path).Handler(handler) - r.Methods("PUT").Path(fmt.Sprintf("%s/", path)).Handler(handler) -} - -func (r *router) delete(path string, handler http.Handler) { - r.Methods("DELETE").Path(path).Handler(handler) - r.Methods("DELETE").Path(fmt.Sprintf("%s/", path)).Handler(handler) -} - -func (r *router) patch(path string, handler http.Handler) { - r.Methods("PATCH").Path(path).Handler(handler) - r.Methods("PATCH").Path(fmt.Sprintf("%s/", path)).Handler(handler) -} - -func (r *router) head(path string, handler http.Handler) { - r.Methods("HEAD").Path(path).Handler(handler) - r.Methods("HEAD").Path(fmt.Sprintf("%s/", path)).Handler(handler) -} - -func (r *router) options(path string, handler http.Handler) { - r.Methods("OPTIONS").Path(path).Handler(handler) - r.Methods("OPTIONS").Path(fmt.Sprintf("%s/", path)).Handler(handler) -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/sample_conf/docker.json b/vendor/code.linksmart.eu/sc/service-catalog/sample_conf/docker.json deleted file mode 100644 index 223d07e1..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/sample_conf/docker.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "description": "Standalone Service Catalog", - "dnssdEnabled": true, - "bindAddr": "0.0.0.0", - "bindPort": 8082, - "apiLocation": "/", - "staticDir": "./static", - "storage": { - "type": "leveldb", - "dsn": "/data" - } -} \ No newline at end of file diff --git a/vendor/code.linksmart.eu/sc/service-catalog/sample_conf/service-catalog-auth.json b/vendor/code.linksmart.eu/sc/service-catalog/sample_conf/service-catalog-auth.json deleted file mode 100644 index 02448802..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/sample_conf/service-catalog-auth.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "description": "Standalone Service Catalog", - "dnssdEnabled": true, - "bindAddr": "0.0.0.0", - "bindPort": 8082, - "apiLocation": "/sc", - "staticDir": "./static", - "storage": { - "type": "memory" - }, - "gc": { - "tunnelingService": "http://localhost:8082/NetworkManager" - }, - "auth": { - "enabled": true, - "provider": "provider-name", - "providerURL": "https://provider-url", - "serviceID": "sampleSC", - "basicEnabled": false, - "authorization": { - "rules": [ - { - "resources": ["/sc"], - "methods": ["GET","POST", "PUT", "DELETE"], - "users": ["admin"], - "groups": [] - }, - { - "resources": ["/sc"], - "methods": ["GET"], - "users": [], - "groups": ["fit"] - } - ] - } - } -} \ No newline at end of file diff --git a/vendor/code.linksmart.eu/sc/service-catalog/sample_conf/service-catalog.json b/vendor/code.linksmart.eu/sc/service-catalog/sample_conf/service-catalog.json deleted file mode 100644 index 2aa18120..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/sample_conf/service-catalog.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "description": "Standalone Service Catalog", - "dnssdEnabled": true, - "bindAddr": "0.0.0.0", - "bindPort": 8082, - "apiLocation": "/sc", - "staticDir": "./static", - "storage": { - "type": "memory" - }, - "gc": { - "tunnelingService": "http://localhost:8082/NetworkManager" - } -} \ No newline at end of file diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/catalog.go b/vendor/code.linksmart.eu/sc/service-catalog/service/catalog.go deleted file mode 100644 index e5b524f5..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/catalog.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "fmt" - "time" -) - -// Structs - -// Service is a service entry in the catalog -type Service struct { - Id string `json:"id"` - URL string `json:"url"` - Type string `json:"type"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Meta map[string]interface{} `json:"meta,omitempty"` - Protocols []Protocol `json:"protocols"` - Representation map[string]interface{} `json:"representation,omitempty"` - Ttl int `json:"ttl,omitempty"` - Created time.Time `json:"created"` - Updated time.Time `json:"updated"` - Expires *time.Time `json:"expires,omitempty"` -} - -// Validates the Service configuration -func (s *Service) validate() error { - - // Validate protocols - if len(s.Protocols) == 0 { - return fmt.Errorf("At least one protocol must be defined") - } - for _, protocol := range s.Protocols { - if protocol.Type == "" { - return fmt.Errorf("Each protocol must have a type") - } - if len(protocol.Endpoint) == 0 { - return fmt.Errorf("Each protocol must have at least one endpoint") - } - } - - return nil -} - -// Checks whether the service can be tunneled in GC -func (s *Service) isGCTunnelable() bool { - // Until the service discovery in GC is not working properly, - // we can only tunnel services that never expire (tll == 0) - if s.Ttl != 0 { - return false - } - - v, ok := s.Meta[MetaKeyGCExpose] - if !ok { - return false - } - - // flag should be bool - e := v.(bool) - if e == true { - return true - } - return false -} - -// Protocol describes the service API -type Protocol struct { - Type string `json:"type"` - Endpoint map[string]interface{} `json:"endpoint"` - Methods []string `json:"methods,omitempty"` - ContentTypes []string `json:"content-types,omitempty"` -} - -// Interfaces - -// Controller interface -type CatalogController interface { - add(s Service) (string, error) - get(id string) (*Service, error) - update(id string, s Service) error - delete(id string) error - list(page, perPage int) ([]Service, int, error) - filter(path, op, value string, page, perPage int) ([]Service, int, error) - total() (int, error) - cleanExpired() - - Stop() error -} - -// Storage interface -type CatalogStorage interface { - add(s *Service) error - get(id string) (*Service, error) - update(id string, s *Service) error - delete(id string) error - list(page, perPage int) ([]Service, int, error) - total() (int, error) - Close() error -} - -// Listener interface can be used for notification of the catalog updates -// NOTE: Implementations are expected to be thread safe -type Listener interface { - added(s Service) - updated(s Service) - deleted(id string) -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/catalogapi.go b/vendor/code.linksmart.eu/sc/service-catalog/service/catalogapi.go deleted file mode 100644 index 4510fb23..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/catalogapi.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "path/filepath" - "strings" - - "code.linksmart.eu/sc/service-catalog/utils" - "github.com/gorilla/mux" -) - -const ( - CtxPath = "/ctx/sc.jsonld" -) - -type Collection struct { - Context string `json:"@context,omitempty"` - Id string `json:"id"` - Type string `json:"type"` - Description string `json:"description"` - Services []Service `json:"services"` - Page int `json:"page"` - PerPage int `json:"per_page"` - Total int `json:"total"` -} - -type JSONLDService struct { - Context string `json:"@context"` - *Service -} - -// Read-only catalog api -type CatalogAPI struct { - controller CatalogController - apiLocation string - ctxPath string - description string -} - -func NewCatalogAPI(controller CatalogController, apiLocation, staticLocation, description string) *CatalogAPI { - return &CatalogAPI{ - controller: controller, - apiLocation: apiLocation, - ctxPath: staticLocation + CtxPath, - description: description, - } -} - -// API Index: Lists services -func (a *CatalogAPI) List(w http.ResponseWriter, req *http.Request) { - err := req.ParseForm() - if err != nil { - ErrorResponse(w, http.StatusBadRequest, "Error parsing the query:", err.Error()) - return - } - page, perPage, err := utils.ParsePagingParams( - req.Form.Get(utils.GetParamPage), req.Form.Get(utils.GetParamPerPage), MaxPerPage) - if err != nil { - ErrorResponse(w, http.StatusBadRequest, "Error parsing query parameters:", err.Error()) - return - } - - services, total, err := a.controller.list(page, perPage) - if err != nil { - ErrorResponse(w, http.StatusInternalServerError, err.Error()) - return - } - - coll := &Collection{ - Context: a.ctxPath, - Id: a.apiLocation, - Type: ApiCollectionType, - Description: a.description, - Services: services, - Page: page, - PerPage: perPage, - Total: total, - } - - b, err := json.Marshal(coll) - if err != nil { - ErrorResponse(w, http.StatusInternalServerError, err.Error()) - return - } - - w.Header().Set("Content-Type", "application/ld+json;version="+ApiVersion) - w.Write(b) -} - -// Filters services -func (a *CatalogAPI) Filter(w http.ResponseWriter, req *http.Request) { - params := mux.Vars(req) - path := params["path"] - op := params["op"] - value := params["value"] - - err := req.ParseForm() - if err != nil { - ErrorResponse(w, http.StatusBadRequest, "Error parsing the query:", err.Error()) - return - } - page, perPage, err := utils.ParsePagingParams( - req.Form.Get(utils.GetParamPage), req.Form.Get(utils.GetParamPerPage), MaxPerPage) - if err != nil { - ErrorResponse(w, http.StatusBadRequest, "Error parsing query parameters:", err.Error()) - return - } - - services, total, err := a.controller.filter(path, op, value, page, perPage) - if err != nil { - ErrorResponse(w, http.StatusInternalServerError, err.Error()) - return - } - - coll := &Collection{ - Context: a.ctxPath, - Id: a.apiLocation, - Type: ApiCollectionType, - Description: a.description, - Services: services, - Page: page, - PerPage: perPage, - Total: total, - } - - b, err := json.Marshal(coll) - if err != nil { - ErrorResponse(w, http.StatusInternalServerError, err.Error()) - return - } - w.Header().Set("Content-Type", "application/ld+json;version="+ApiVersion) - w.Write(b) -} - -// Retrieves a service -func (a *CatalogAPI) Get(w http.ResponseWriter, req *http.Request) { - params := mux.Vars(req) - - s, err := a.controller.get(params["id"]) - if err != nil { - switch err.(type) { - case *NotFoundError: - ErrorResponse(w, http.StatusNotFound, err.Error()) - return - default: - ErrorResponse(w, http.StatusInternalServerError, "Error retrieving the service:", err.Error()) - return - } - } - - lds := JSONLDService{ - Context: a.ctxPath, - Service: s, - } - - b, err := json.Marshal(lds) - if err != nil { - ErrorResponse(w, http.StatusInternalServerError, err.Error()) - return - } - - w.Header().Set("Content-Type", "application/ld+json;version="+ApiVersion) - w.Write(b) -} - -// Adds a service -func (a *CatalogAPI) Post(w http.ResponseWriter, req *http.Request) { - body, err := ioutil.ReadAll(req.Body) - if err != nil { - ErrorResponse(w, http.StatusBadRequest, err.Error()) - return - } - req.Body.Close() - - var s Service - if err := json.Unmarshal(body, &s); err != nil { - ErrorResponse(w, http.StatusBadRequest, "Error processing the request:", err.Error()) - return - } - - if s.Id != "" { - ErrorResponse(w, http.StatusBadRequest, "Creating a service with defined ID is not possible using a POST request.") - return - } - - id, err := a.controller.add(s) - if err != nil { - switch err.(type) { - case *ConflictError: - ErrorResponse(w, http.StatusConflict, "Error creating the registration:", err.Error()) - return - case *BadRequestError: - ErrorResponse(w, http.StatusBadRequest, "Invalid service registration:", err.Error()) - return - default: - ErrorResponse(w, http.StatusInternalServerError, "Error creating the registration:", err.Error()) - return - } - } - - w.Header().Set("Content-Type", "application/ld+json;version="+ApiVersion) - w.Header().Set("Location", fmt.Sprintf("%s/%s", a.apiLocation, id)) - w.WriteHeader(http.StatusCreated) -} - -// Updates an existing service (Response: StatusOK) -// or creates a new one with the given id (Response: StatusCreated) -func (a *CatalogAPI) Put(w http.ResponseWriter, req *http.Request) { - params := mux.Vars(req) - - body, err := ioutil.ReadAll(req.Body) - req.Body.Close() - if err != nil { - ErrorResponse(w, http.StatusBadRequest, err.Error()) - return - } - - var s Service - if err := json.Unmarshal(body, &s); err != nil { - ErrorResponse(w, http.StatusBadRequest, "Error processing the request:", err.Error()) - return - } - - err = a.controller.update(params["id"], s) - if err != nil { - switch err.(type) { - case *NotFoundError: - // Create a new service with the given id - s.Id = params["id"] - id, err := a.controller.add(s) - if err != nil { - ErrorResponse(w, http.StatusInternalServerError, "Error creating the registration:", err.Error()) - return - } - w.Header().Set("Content-Type", "application/ld+json;version="+ApiVersion) - w.Header().Set("Location", fmt.Sprintf("%s/%s", a.apiLocation, id)) - w.WriteHeader(http.StatusCreated) - return - case *ConflictError: - ErrorResponse(w, http.StatusConflict, "Error updating the service:", err.Error()) - return - case *BadRequestError: - ErrorResponse(w, http.StatusBadRequest, "Invalid service registration:", err.Error()) - return - default: - ErrorResponse(w, http.StatusInternalServerError, "Error updating the service:", err.Error()) - return - } - } - - w.Header().Set("Content-Type", "application/ld+json;version="+ApiVersion) - w.WriteHeader(http.StatusOK) -} - -// Deletes a service -func (a *CatalogAPI) Delete(w http.ResponseWriter, req *http.Request) { - params := mux.Vars(req) - - err := a.controller.delete(params["id"]) - if err != nil { - switch err.(type) { - case *NotFoundError: - ErrorResponse(w, http.StatusNotFound, err.Error()) - return - default: - ErrorResponse(w, http.StatusInternalServerError, "Error deleting the service:", err.Error()) - return - } - } - - w.Header().Set("Content-Type", "application/ld+json;version="+ApiVersion) - w.WriteHeader(http.StatusOK) -} - -// Serves static and all /static/ctx files as ld+json -func NewStaticHandler(staticDir string) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - if strings.HasPrefix(req.RequestURI, StaticLocation+"/ctx/") { - w.Header().Set("Content-Type", "application/ld+json") - } - urlParts := strings.Split(req.URL.Path, "/") - http.ServeFile(w, req, filepath.Join(staticDir, strings.Join(urlParts[2:], "/"))) - } -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/catalogapi_test.go b/vendor/code.linksmart.eu/sc/service-catalog/service/catalogapi_test.go deleted file mode 100644 index 21f95407..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/catalogapi_test.go +++ /dev/null @@ -1,485 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "os" - "strings" - "testing" - - "github.com/gorilla/mux" - "github.com/pborman/uuid" -) - -func setupRouter() (*mux.Router, func(), error) { - var ( - storage CatalogStorage - err error - tempDir string = fmt.Sprintf("%s/lslc/test-%s.ldb", - strings.Replace(os.TempDir(), "\\", "/", -1), uuid.New()) - ) - switch TestStorageType { - case CatalogBackendMemory: - storage = NewMemoryStorage() - case CatalogBackendLevelDB: - storage, err = NewLevelDBStorage(tempDir, nil) - if err != nil { - return nil, nil, err - } - } - - controller, err := NewController(storage, TestApiLocation) - if err != nil { - storage.Close() - return nil, nil, fmt.Errorf("Failed to start the controller: %v", err.Error()) - } - - api := NewCatalogAPI( - controller, - TestApiLocation, - TestStaticLocation, - "Test catalog", - ) - - r := mux.NewRouter().StrictSlash(true) - // CRUD - r.Methods("POST").Path(TestApiLocation + "/").HandlerFunc(api.Post) - r.Methods("GET").Path(TestApiLocation + "/{id:[^/]+/?[^/]*}").HandlerFunc(api.Get) - r.Methods("PUT").Path(TestApiLocation + "/{id:[^/]+/?[^/]*}").HandlerFunc(api.Put) - r.Methods("DELETE").Path(TestApiLocation + "/{id:[^/]+/?[^/]*}").HandlerFunc(api.Delete) - // List, Filter - r.Methods("GET").Path(TestApiLocation).HandlerFunc(api.List) - r.Methods("GET").Path(TestApiLocation + "/{path}/{op}/{value:.*}").HandlerFunc(api.Filter) - - return r, func() { - controller.Stop() - os.RemoveAll(tempDir) // Remove temp files - }, nil -} - -func mockedService(id string) *Service { - return &Service{ - Id: "TestHost/TestService" + id, - Type: ApiRegistrationType, - Name: "TestService" + id, - Meta: map[string]interface{}{"test-id": id}, - Description: "Test Service", - Protocols: []Protocol{Protocol{ - Type: "REST", - Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}, - Methods: []string{"GET"}, - ContentTypes: []string{"application/json"}, - }}, - Representation: map[string]interface{}{"application/json": "{}"}, - Ttl: 30, - } -} - -func sameServices(s1, s2 *Service, checkID bool) bool { - // Compare IDs if specified - if checkID { - if s1.Id != s2.Type { - return false - } - } - - // Compare metadata - for k1, v1 := range s1.Meta { - v2, ok := s2.Meta[k1] - if !ok || v1 != v2 { - return false - } - } - for k2, v2 := range s2.Meta { - v1, ok := s1.Meta[k2] - if !ok || v1 != v2 { - return false - } - } - - // Compare number of protocols - if len(s1.Protocols) != len(s2.Protocols) { - return false - } - - // Compare all other attributes - if s1.Type != s2.Type || s1.Name != s2.Name || s1.Description != s2.Description || s1.Ttl != s2.Ttl { - return false - } - - return true -} - -func TestList(t *testing.T) { - router, shutdown, err := setupRouter() - if err != nil { - t.Fatal(err.Error()) - } - ts := httptest.NewServer(router) - defer ts.Close() - defer shutdown() - - url := ts.URL + TestApiLocation - t.Log("Calling GET", url) - res, err := http.Get(url) - if err != nil { - t.Fatal(err.Error()) - } - - if res.StatusCode != http.StatusOK { - t.Fatalf("Server should return %v, got instead: %v (%s)", http.StatusOK, res.StatusCode, res.Status) - } - - if !strings.HasPrefix(res.Header.Get("Content-Type"), "application/ld+json") { - t.Fatalf("Response should have Content-Type: application/ld+json, got instead %s", res.Header.Get("Content-Type")) - } - - var collection *Collection - decoder := json.NewDecoder(res.Body) - defer res.Body.Close() - - err = decoder.Decode(&collection) - if err != nil { - t.Fatal(err.Error()) - } - - if collection.Total > 0 { - t.Fatal("Server should return empty collection, but got total", collection.Total) - } -} - -func TestCreate(t *testing.T) { - router, shutdown, err := setupRouter() - if err != nil { - t.Fatal(err.Error()) - } - ts := httptest.NewServer(router) - defer ts.Close() - defer shutdown() - - service := mockedService("1") - service.Id = "" - b, _ := json.Marshal(service) - - // Create - url := ts.URL + TestApiLocation + "/" - t.Log("Calling POST", url) - res, err := http.Post(url, "application/ld+json", bytes.NewReader(b)) - if err != nil { - t.Fatal(err.Error()) - } - - if res.StatusCode != http.StatusCreated { - t.Fatalf("Server should return %v, got instead: %v (%s)", http.StatusCreated, res.StatusCode, res.Status) - } - - if !strings.HasPrefix(res.Header.Get("Content-Type"), "application/ld+json") { - t.Fatalf("Response should have Content-Type: application/ld+json, got instead %s", res.Header.Get("Content-Type")) - } - - // Check if system-generated id is in response - location, err := res.Location() - if err != nil { - t.Fatal(err.Error()) - } - parts := strings.Split(location.String(), "/") - if !strings.HasPrefix(parts[len(parts)-1], "urn:ls_service:") { - t.Fatalf("System-generated URN doesn't have `urn:ls_service:` as prefix. Getting location: %v\n", location.String()) - } - - // Retrieve whole collection - t.Log("Calling GET", ts.URL+TestApiLocation) - res, err = http.Get(ts.URL + TestApiLocation) - if err != nil { - t.Fatal(err.Error()) - } - - var collection *Collection - decoder := json.NewDecoder(res.Body) - defer res.Body.Close() - - err = decoder.Decode(&collection) - - if err != nil { - t.Fatal(err.Error()) - } - - if collection.Total != 1 { - t.Fatal("Server should return collection with exactly 1 resource, but got total", collection.Total) - } -} - -func TestRetrieve(t *testing.T) { - router, shutdown, err := setupRouter() - if err != nil { - t.Fatal(err.Error()) - } - ts := httptest.NewServer(router) - defer ts.Close() - defer shutdown() - - service := mockedService("1") - b, _ := json.Marshal(service) - - // Create - url := ts.URL + TestApiLocation + "/" + service.Id - t.Log("Calling PUT", url) - res, err := httpPut(url, bytes.NewReader(b)) - if err != nil { - t.Fatal(err.Error()) - } - - // Retrieve: service - t.Log("Calling GET", url) - res, err = http.Get(url) - if err != nil { - t.Fatal(err.Error()) - } - - if res.StatusCode != http.StatusOK { - t.Fatalf("Server should return %v, got instead: %v (%s)", http.StatusOK, res.StatusCode, res.Status) - } - - if !strings.HasPrefix(res.Header.Get("Content-Type"), "application/ld+json") { - t.Fatalf("Response should have Content-Type: application/ld+json, got instead %s", res.Header.Get("Content-Type")) - } - - var service2 *Service - decoder := json.NewDecoder(res.Body) - defer res.Body.Close() - - err = decoder.Decode(&service2) - if err != nil { - t.Fatal(err.Error()) - } - - if !strings.HasPrefix(service2.URL, TestApiLocation) { - t.Fatalf("Service URL should have been prefixed with %v by catalog, retrieved URL: %v", TestApiLocation, service2.URL) - } - if !sameServices(service, service2, false) { - t.Fatalf("The retrieved service is not the same as the added one:\n Added:\n %v \n Retrieved: \n %v", service, service2) - } -} - -func TestUpdate(t *testing.T) { - router, shutdown, err := setupRouter() - if err != nil { - t.Fatal(err.Error()) - } - ts := httptest.NewServer(router) - defer ts.Close() - defer shutdown() - - service := mockedService("1") - b, _ := json.Marshal(service) - - // Create - url := ts.URL + TestApiLocation + "/" + service.Id - t.Log("Calling PUT", url) - res, err := httpPut(url, bytes.NewReader(b)) - if err != nil { - t.Fatal(err.Error()) - } - - // Update - service2 := mockedService("1") - service2.Description = "Updated Test Service" - b, _ = json.Marshal(service2) - - t.Log("Calling PUT", url) - res, err = httpPut(url, bytes.NewReader(b)) - if err != nil { - t.Fatal(err.Error()) - } - - if res.StatusCode != http.StatusOK { - t.Fatalf("Server should return %v, got instead: %v (%s)", http.StatusOK, res.StatusCode, res.Status) - } - - // Retrieve & compare - t.Log("Calling GET", url) - res, err = http.Get(url) - if err != nil { - t.Fatal(err.Error()) - } - - var service3 *Service - decoder := json.NewDecoder(res.Body) - defer res.Body.Close() - - err = decoder.Decode(&service3) - if err != nil { - t.Fatal(err.Error()) - } - - if !sameServices(service2, service3, false) { - t.Fatalf("The retrieved service is not the same as the added one:\n Added:\n %v \n Retrieved: \n %v", service2, service3) - } - - // Create with user-defined ID (PUT for creation) - service4 := mockedService("1") - b, _ = json.Marshal(service4) - url = ts.URL + TestApiLocation + "/service123" - t.Log("Calling PUT", url) - res, err = httpPut(url, bytes.NewReader(b)) - if err != nil { - t.Fatal(err.Error()) - } - - if res.StatusCode != http.StatusCreated { - t.Fatalf("Server should return %v, got instead: %v (%s)", http.StatusCreated, res.StatusCode, res.Status) - } - - // Check if user-defined id is in response - location, err := res.Location() - if err != nil { - t.Fatal(err.Error()) - } - parts := strings.Split(location.String(), "/") - if parts[len(parts)-1] != "service123" { - t.Fatalf("User-defined id is not returned in location. Getting %v\n", location.String()) - } -} - -func TestDelete(t *testing.T) { - router, shutdown, err := setupRouter() - if err != nil { - t.Fatal(err.Error()) - } - ts := httptest.NewServer(router) - defer ts.Close() - defer shutdown() - - service := mockedService("1") - b, _ := json.Marshal(service) - - // Create - url := ts.URL + TestApiLocation + "/" + service.Id - t.Log("Calling PUT", url) - res, err := httpPut(url, bytes.NewReader(b)) - if err != nil { - t.Fatal(err.Error()) - } - - // Delete - t.Log("Calling DELETE", url) - req, err := http.NewRequest("DELETE", url, bytes.NewReader([]byte{})) - if err != nil { - t.Fatal(err.Error()) - } - res, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatal(err.Error()) - } - - if res.StatusCode != http.StatusOK { - t.Fatalf("Server should return %v, got instead: %v (%s)", http.StatusOK, res.StatusCode, res.Status) - } - - // Retrieve whole collection - t.Log("Calling GET", ts.URL+TestApiLocation) - res, err = http.Get(ts.URL + TestApiLocation) - if err != nil { - t.Fatal(err.Error()) - } - - var collection *Collection - decoder := json.NewDecoder(res.Body) - defer res.Body.Close() - - err = decoder.Decode(&collection) - - if err != nil { - t.Fatal(err.Error()) - } - - if collection.Total != 0 { - t.Fatal("Server should return an empty collection, but got total", collection.Total) - } - -} - -func TestFilter(t *testing.T) { - router, shutdown, err := setupRouter() - if err != nil { - t.Fatal(err.Error()) - } - ts := httptest.NewServer(router) - defer ts.Close() - defer shutdown() - - // create 3 services - service1 := mockedService("1") - service2 := mockedService("2") - service3 := mockedService("3") - - // Add - url := ts.URL + TestApiLocation + "/" - for _, s := range []*Service{service1, service2, service3} { - s.Id = "" - b, _ := json.Marshal(s) - - _, err := http.Post(url, "application/ld+json", bytes.NewReader(b)) - if err != nil { - t.Fatal(err.Error()) - } - } - - // Services - // Filter many - url = ts.URL + TestApiLocation + "/name/" + FOpPrefix + "/" + "Test" - t.Log("Calling GET", url) - res, err := http.Get(url) - if err != nil { - t.Fatal(err.Error()) - } - defer res.Body.Close() - - var collection *Collection - decoder := json.NewDecoder(res.Body) - err = decoder.Decode(&collection) - if err != nil { - t.Fatal(err.Error()) - } - - if collection.Total != 3 { - t.Fatal("Server should return a collection of *3* resources, but got total", collection.Total) - } - - // Filter one - url = ts.URL + TestApiLocation + "/name/" + FOpEquals + "/" + service1.Name - t.Log("Calling GET", url) - res, err = http.Get(url) - if err != nil { - t.Fatal(err.Error()) - } - defer res.Body.Close() - - var collection2 *Collection - decoder2 := json.NewDecoder(res.Body) - err = decoder2.Decode(&collection2) - if err != nil { - t.Fatal(err.Error()) - } - - if !sameServices(service1, &collection2.Services[0], false) { - t.Fatalf("The retrieved service is not the same as the added one:\n Added:\n %v \n Retrieved: \n %v", service1, collection2.Services[0]) - } -} - -func httpPut(url string, r *bytes.Reader) (*http.Response, error) { - req, err := http.NewRequest("PUT", url, r) - if err != nil { - return nil, err - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - return res, nil -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/client.go b/vendor/code.linksmart.eu/sc/service-catalog/service/client.go deleted file mode 100644 index 7888c608..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/client.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -// ServiceConfig is a wrapper for Service to be used by -// clients to configure a Service (e.g., read from file) -type ServiceConfig struct { - *Service - Host string -} - -// Returns a Service object from the ServiceConfig -func (sc *ServiceConfig) GetService() (*Service, error) { - sc.Id = sc.Host + "/" + sc.Name - if err := sc.Service.validate(); err != nil { - return nil, err - } - return sc.Service, nil -} - -// Catalog client -type CatalogClient interface { - // CRUD - Get(id string) (*Service, error) - Add(s *Service) (string, error) - Update(id string, s *Service) error - Delete(id string) error - - // Returns a slice of Services given: - // page - page in the collection - // perPage - number of entries per page - List(page, perPage int) ([]Service, int, error) - - // Returns a slice of Services given: path, operation, value, page, perPage - Filter(path, op, value string, page, perPage int) ([]Service, int, error) -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/constants.go b/vendor/code.linksmart.eu/sc/service-catalog/service/constants.go deleted file mode 100644 index 801fecac..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/constants.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -const ( - DNSSDServiceType = "_linksmart-sc._tcp" - MaxPerPage = 100 - ApiVersion = "1.0.0" - ApiCollectionType = "ServiceCatalog" - ApiRegistrationType = "Service" - loggerPrefix = "[sc] " - - // MetaKeyGCExpose is the meta key indicating - // that the service needs to be tunneled in GC - MetaKeyGCExpose = "gc_expose" - - CatalogBackendMemory = "memory" - CatalogBackendLevelDB = "leveldb" - StaticLocation = "/static" -) diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/controller.go b/vendor/code.linksmart.eu/sc/service-catalog/service/controller.go deleted file mode 100644 index 106e69f6..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/controller.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "fmt" - "strings" - "sync" - "time" - - "code.linksmart.eu/sc/service-catalog/utils" - avl "github.com/ancientlore/go-avltree" -) - -type Controller struct { - wg sync.WaitGroup - sync.RWMutex - storage CatalogStorage - apiLocation string - listeners []Listener - ticker *time.Ticker - - // startTime and counter for ID generation - startTime int64 - counter int64 - - // sorted expiryTime->serviceID maps - exp_sid *avl.Tree -} - -func NewController(storage CatalogStorage, apiLocation string, listeners ...Listener) (CatalogController, error) { - c := Controller{ - storage: storage, - apiLocation: apiLocation, - exp_sid: avl.New(timeKeys, avl.AllowDuplicates), // allows more than one service with the same expiry time - startTime: time.Now().UTC().Unix(), - listeners: listeners, - } - - // Initialize secondary indices (if a persistent storage backend is present) - err := c.initIndices() - if err != nil { - return nil, err - } - - c.ticker = time.NewTicker(5 * time.Second) - go c.cleanExpired() - - return &c, nil -} - -func (c *Controller) add(s Service) (string, error) { - if err := s.validate(); err != nil { - return "", &BadRequestError{err.Error()} - } - - c.Lock() - defer c.Unlock() - - if s.Id == "" { - // System generated id - s.Id = c.newURN() - } - s.URL = fmt.Sprintf("%s/%s", c.apiLocation, s.Id) - s.Type = ApiRegistrationType - s.Created = time.Now().UTC() - s.Updated = s.Created - if s.Ttl == 0 { - s.Expires = nil - } else { - expires := s.Created.Add(time.Duration(s.Ttl) * time.Second) - s.Expires = &expires - } - - err := c.storage.add(&s) - if err != nil { - return "", err - } - - // Add secondary indices - c.addIndices(&s) - - // notify listeners - for _, l := range c.listeners { - go l.added(s) - } - - return s.Id, nil -} - -func (c *Controller) get(id string) (*Service, error) { - return c.storage.get(id) -} - -func (c *Controller) update(id string, s Service) error { - if err := s.validate(); err != nil { - return &BadRequestError{err.Error()} - } - - c.Lock() - defer c.Unlock() - - // Get the stored service - ss, err := c.storage.get(id) - if err != nil { - return err - } - - // Shallow copy - var cp Service = *ss - - ss.Name = s.Name - ss.Description = s.Description - ss.Meta = s.Meta - ss.Ttl = s.Ttl - ss.Updated = time.Now().UTC() - if ss.Ttl == 0 { - ss.Expires = nil - } else { - expires := ss.Updated.Add(time.Duration(ss.Ttl) * time.Second) - ss.Expires = &expires - } - - err = c.storage.update(id, ss) - if err != nil { - return err - } - - // Update secondary indices - c.removeIndices(&cp) - c.addIndices(ss) - - // notify listeners - for _, l := range c.listeners { - go l.updated(s) - } - - return nil -} - -func (c *Controller) delete(id string) error { - c.Lock() - defer c.Unlock() - - old, err := c.storage.get(id) - if err != nil { - return err - } - - err = c.storage.delete(id) - if err != nil { - return err - } - - // Remove secondary indices - c.removeIndices(old) - - // notify listeners - for _, l := range c.listeners { - go l.deleted(old.Id) - } - - return nil -} - -func (c *Controller) list(page, perPage int) ([]Service, int, error) { - return c.storage.list(page, perPage) -} - -func (c *Controller) filter(path, op, value string, page, perPage int) ([]Service, int, error) { - c.RLock() - defer c.RUnlock() - - matches := make([]Service, 0) - pp := MaxPerPage - for p := 1; ; p++ { - services, t, err := c.storage.list(p, pp) - if err != nil { - return nil, 0, err - } - - for i := range services { - matched, err := utils.MatchObject(services[i], strings.Split(path, "."), op, value) - if err != nil { - return nil, 0, err - } - if matched { - matches = append(matches, services[i]) - } - } - - if p*pp >= t { - break - } - } - // Pagination - offset, limit, err := utils.GetPagingAttr(len(matches), page, perPage, MaxPerPage) - if err != nil { - return nil, 0, &BadRequestError{fmt.Sprintf("Unable to paginate: %s", err)} - } - // Return the page - return matches[offset : offset+limit], len(matches), nil -} - -func (c *Controller) total() (int, error) { - return c.storage.total() -} - -func (c *Controller) cleanExpired() { - for t := range c.ticker.C { - c.Lock() - - var expiredList []Map - for m := range c.exp_sid.Iter() { - if !m.(Map).key.(time.Time).After(t.UTC()) { - expiredList = append(expiredList, m.(Map)) - } else { - // exp_did is sorted by time ascendingly: its elements expire in order - break - } - } - - for _, m := range expiredList { - id := m.value.(string) - logger.Printf("cleanExpired() Registration %v has expired\n", id) - - old, err := c.storage.get(id) - if err != nil { - logger.Printf("cleanExpired() Error retrieving device %v: %v\n", id, err.Error()) - break - } - - err = c.storage.delete(id) - if err != nil { - logger.Printf("cleanExpired() Error removing device %v: %v\n", id, err.Error()) - break - } - // Remove secondary indices - c.removeIndices(old) - } - - c.Unlock() - } -} - -// Stop the controller -func (c *Controller) Stop() error { - c.ticker.Stop() - return c.storage.Close() -} - -// UTILITY FUNCTIONS - -// Generate a new unique urn for service -// Format: urn:ls_service:id, where id is the timestamp(s) of the controller startTime+counter in hex -// WARNING: the caller must obtain the lock before calling -func (c *Controller) newURN() string { - c.counter++ - return fmt.Sprintf("urn:ls_service:%x", c.startTime+c.counter) -} - -// Initialize secondary indices (from a persistent storage backend) -func (c *Controller) initIndices() error { - perPage := MaxPerPage - for page := 1; ; page++ { - devices, total, err := c.storage.list(page, perPage) - if err != nil { - return err - } - - for i, _ := range devices { - c.addIndices(&devices[i]) - } - - if page*perPage >= total { - break - } - } - return nil -} - -// Creates secondary indices -// WARNING: the caller must obtain the lock before calling -func (c *Controller) addIndices(s *Service) { - - // Add expiry time index - if s.Ttl != 0 { - c.exp_sid.Add(Map{*s.Expires, s.Id}) - } -} - -// Removes secondary indices -// WARNING: the caller must obtain the lock before calling -func (c *Controller) removeIndices(s *Service) { - - // Remove the expiry time index - // INFO: - // More than one service can have the same expiry time (i.e. map's key) - // which leads to non-unique keys in the maps. - // This code removes keys with that expiry time (keeping them in a temp) until the - // desired target is reached. It then adds the items in the temp back to the tree. - if s.Ttl != 0 { - var temp []Map - for m := range c.exp_sid.Iter() { - id := m.(Map).value.(string) - if id == s.Id { - for { // go through all duplicates (same expiry times) - r := c.exp_sid.Remove(m) - if r == nil { - break - } - if id != s.Id { - temp = append(temp, r.(Map)) - } - } - break - } - } - for _, r := range temp { - c.exp_sid.Add(r) - } - } -} - -// AVL Tree: sorted nodes according to keys -// -// A node of the AVL Tree (go-avltree) -type Map struct { - key interface{} - value interface{} -} - -// Operator for Time-type key -func timeKeys(a interface{}, b interface{}) int { - if a.(Map).key.(time.Time).Before(b.(Map).key.(time.Time)) { - return -1 - } else if a.(Map).key.(time.Time).After(b.(Map).key.(time.Time)) { - return 1 - } - return 0 -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/controller_test.go b/vendor/code.linksmart.eu/sc/service-catalog/service/controller_test.go deleted file mode 100644 index 28160513..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/controller_test.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/pborman/uuid" -) - -func setup() (CatalogController, func(), error) { - var ( - storage CatalogStorage - err error - tempDir string = fmt.Sprintf("%s/lslc/test-%s.ldb", - strings.Replace(os.TempDir(), "\\", "/", -1), uuid.New()) - ) - switch TestStorageType { - case CatalogBackendMemory: - storage = NewMemoryStorage() - case CatalogBackendLevelDB: - storage, err = NewLevelDBStorage(tempDir, nil) - if err != nil { - return nil, nil, err - } - } - - controller, err := NewController(storage, TestApiLocation) - if err != nil { - storage.Close() - return nil, nil, err - } - - return controller, func() { - controller.Stop() - os.RemoveAll(tempDir) // Remove temp files - }, nil -} - -func TestAddService(t *testing.T) { - t.Log(TestStorageType) - controller, shutdown, err := setup() - if err != nil { - t.Fatal(err.Error()) - } - defer shutdown() - - // User-defined id - var r Service - uuid := "E9203BE9-D705-42A8-8B12-F28E7EA2FC99" - r.Name = "ServiceName" - r.Id = uuid + "/" + r.Name - r.Ttl = 30 - r.Protocols = []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}} - - id, err := controller.add(r) - if err != nil { - t.Fatalf("Unexpected error on add: %v", err.Error()) - } - if id != r.Id { - t.Fatalf("User defined ID is not returned. Getting %v instead of %v\n", id, r.Id) - } - - _, err = controller.add(r) - if err == nil { - t.Error("Didn't get any error when adding a service with non-unique id.") - } - - // System-generated id - var r2 Service - r2.Name = "ServiceName" - r2.Protocols = []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}} - - id, err = controller.add(r2) - if err != nil { - t.Fatalf("Unexpected error on add: %v", err.Error()) - } - if !strings.HasPrefix(id, "urn:ls_service:") { - t.Fatalf("System-generated URN doesn't have `urn:ls_service:` as prefix. Getting location: %v\n", id) - } -} - -func TestUpdateService(t *testing.T) { - t.Log(TestStorageType) - controller, shutdown, err := setup() - if err != nil { - t.Fatal(err.Error()) - } - defer shutdown() - - var r Service - uuid := "E9203BE9-D705-42A8-8B12-F28E7EA2FC99" - r.Name = "ServiceName" - r.Id = uuid + "/" + r.Name - r.Ttl = 30 - r.Protocols = []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}} - - _, err = controller.add(r) - if err != nil { - t.Errorf("Unexpected error on add: %v", err.Error()) - } - r.Name = "UpdatedName" - - err = controller.update(r.Id, r) - if err != nil { - t.Errorf("Unexpected error on update: %v", err.Error()) - } - - rg, err := controller.get(r.Id) - if err != nil { - t.Error("Unexpected error on get: %v", err.Error()) - } - - if rg.Name != r.Name { - t.Fail() - } -} - -func TestGetService(t *testing.T) { - t.Log(TestStorageType) - controller, shutdown, err := setup() - if err != nil { - t.Fatal(err.Error()) - } - defer shutdown() - - r := Service{ - Name: "TestName", - } - uuid := "E9203BE9-D705-42A8-8B12-F28E7EA2FC99" - r.Name = "ServiceName" - r.Id = uuid + "/" + r.Name - r.Ttl = 30 - r.Protocols = []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}} - - _, err = controller.add(r) - if err != nil { - t.Errorf("Unexpected error on add: %v", err.Error()) - } - - rg, err := controller.get(r.Id) - if err != nil { - t.Error("Unexpected error on get: %v", err.Error()) - } - - if rg.Id != r.Id || rg.Name != r.Name || rg.Ttl != r.Ttl { - t.Fail() - } -} - -func TestDeleteService(t *testing.T) { - t.Log(TestStorageType) - controller, shutdown, err := setup() - if err != nil { - t.Fatal(err.Error()) - } - defer shutdown() - - var r Service - uuid := "E9203BE9-D705-42A8-8B12-F28E7EA2FC99" - r.Name = "ServiceName" - r.Id = uuid + "/" + r.Name - r.Ttl = 30 - r.Protocols = []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}} - - _, err = controller.add(r) - if err != nil { - t.Errorf("Unexpected error on add: %v", err.Error()) - } - - err = controller.delete(r.Id) - if err != nil { - t.Error("Unexpected error on delete: %v", err.Error()) - } - - err = controller.delete(r.Id) - if err == nil { - t.Error("Didn't get any error when deleting a deleted service.") - } -} - -func TestListServices(t *testing.T) { - t.Log(TestStorageType) - controller, shutdown, err := setup() - if err != nil { - t.Fatal(err.Error()) - } - defer shutdown() - - var r Service - - // Add 10 entries - for i := 0; i < 11; i++ { - r.Name = string(i) - r.Id = "TestID" + "/" + r.Name - r.Ttl = 30 - r.Protocols = []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}} - _, err := controller.add(r) - - if err != nil { - t.Errorf("Unexpected error on add: %v", err.Error()) - } - } - - p1pp2, total, _ := controller.list(1, 2) - if total != 11 { - t.Errorf("Expected total is 11, returned: %v", total) - } - - if len(p1pp2) != 2 { - t.Errorf("Wrong number of entries: requested page=1 , perPage=2. Expected: 2, returned: %v", len(p1pp2)) - } - - p2pp2, _, _ := controller.list(2, 2) - if len(p2pp2) != 2 { - t.Errorf("Wrong number of entries: requested page=2 , perPage=2. Expected: 2, returned: %v", len(p2pp2)) - } - - p2pp5, _, _ := controller.list(2, 5) - if len(p2pp5) != 5 { - t.Errorf("Wrong number of entries: requested page=2 , perPage=5. Expected: 5, returned: %v", len(p2pp5)) - } - - p4pp3, _, _ := controller.list(4, 3) - if len(p4pp3) != 2 { - t.Errorf("Wrong number of entries: requested page=4 , perPage=3. Expected: 2, returned: %v", len(p4pp3)) - } -} - -func TestFilterService(t *testing.T) { - t.Log(TestStorageType) - controller, shutdown, err := setup() - if err != nil { - t.Fatal(err.Error()) - } - defer shutdown() - - for i := 0; i < 5; i++ { - _, err := controller.add(Service{ - Name: fmt.Sprintf("boring_%d", i), - Protocols: []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}}, - }) - if err != nil { - t.Fatal("Error adding a service:", err.Error()) - } - } - - controller.add(Service{ - Name: "interesting_1", - Protocols: []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}}, - }) - controller.add(Service{ - Name: "interesting_2", - Protocols: []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}}, - }) - - services, total, err := controller.filter("name", "prefix", "interesting", 1, 10) - if err != nil { - t.Fatal("Error filtering services:", err.Error()) - } - if total != 2 { - t.Fatalf("Returned %d instead of 2 services when filtering name/prefix/interesting: \n%v", total, services) - } - for _, s := range services { - if !strings.Contains(s.Name, "interesting") { - t.Fatal("Wrong results when filtering name/prefix/interesting:\n", s) - } - } -} - -func TestCleanExpired(t *testing.T) { - t.Log(TestStorageType) - controller, shutdown, err := setup() - if err != nil { - t.Fatal(err.Error()) - } - defer shutdown() - - var d = Service{ - Name: "my_service", - Ttl: 1, - Protocols: []Protocol{Protocol{Type: "REST", Endpoint: map[string]interface{}{"url": "http://localhost:9000/api"}}}, - } - - id, err := controller.add(d) - if err != nil { - t.Fatal("Error adding a service:", err.Error()) - } - - addingTime := time.Now() - time.Sleep(6 * time.Second) - - checkingTime := time.Now() - dd, err := controller.get(id) - if err != nil { - switch err.(type) { - case *NotFoundError: - // good - default: - t.Fatalf("Got an error other than NotFoundError when getting an expired service: %s\n", err) - } - } else { - t.Fatalf("Service was not removed after 1 seconds. \nTTL: %v \nCreated: %v \nExpiry: %v \nNot deleted after: %v at %v\n", - dd.Ttl, - dd.Created, - dd.Expires, - checkingTime.Sub(addingTime), - checkingTime.UTC(), - ) - } -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/doc.go b/vendor/code.linksmart.eu/sc/service-catalog/service/doc.go deleted file mode 100644 index 7281bfff..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -// Package catalog/service implements storage backend, RESTful API handlers, -// and a client SDK for service catalog implementation. -package service diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/errors.go b/vendor/code.linksmart.eu/sc/service-catalog/service/errors.go deleted file mode 100644 index 15ccf7c5..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/errors.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "encoding/json" - "net/http" - "strings" -) - -// Not Found -type NotFoundError struct{ s string } - -func (e *NotFoundError) Error() string { return e.s } - -// Conflict (non-unique id, assignment to read-only data) -type ConflictError struct{ s string } - -func (e *ConflictError) Error() string { return e.s } - -// Bad Request -type BadRequestError struct{ s string } - -func (e *BadRequestError) Error() string { return e.s } - -// Error describes an API error (serializable in JSON) -type Error struct { - // Code is the (http) code of the error - Code int `json:"code"` - // Message is the (human-readable) error message - Message string `json:"message"` -} - -// ErrorResponse writes error to HTTP ResponseWriter -func ErrorResponse(w http.ResponseWriter, code int, msgs ...string) { - msg := strings.Join(msgs, " ") - e := &Error{ - code, - msg, - } - if code >= 500 { - logger.Println("ERROR:", msg) - } - b, _ := json.Marshal(e) - w.Header().Set("Content-Type", "application/json;version="+ApiVersion) - w.WriteHeader(code) - w.Write(b) -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/gcpublisher.go b/vendor/code.linksmart.eu/sc/service-catalog/service/gcpublisher.go deleted file mode 100644 index 39f14478..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/gcpublisher.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "sync" - "time" -) - -const ( - // HTTPBackboneName is the fully qualified java class name for HTTP backbone used in LS GC - HTTPBackboneName = "eu.linksmart.gc.network.backbone.protocol.http.HttpImpl" - // ProtocolTypeREST used in LC Service registration for Web/HTTP services - ProtocolTypeREST = "REST" - // RESTEndpointURL is the key in Protocol.Endpoint describing the endpoint of an Web/HTTP service - RESTEndpointURL = "url" - // ServiceAttributeRegistration is the attribute key for TunneledService storing the (modified) Service object - ServiceAttributeRegistration = "SC" - // ServiceAttributeID is the attribute key for TunneledService storing the Service ID - ServiceAttributeID = "SID" - // ServiceAttributeDescription is the attribute key for TunneledService storing the Service description - ServiceAttributeDescription = "DESCRIPTION" -) - -// GCPublisher is a catalog Listener publishing catalog updates to the GlobalConnect -type GCPublisher struct { - serviceEndpoint url.URL - services map[string]syncedService - mutex sync.RWMutex -} - -// TunneledService describes a service tunneled with LinkSmart GC -type TunneledService struct { - Endpoint string `json:"Endpoint"` - BackboneName string `json:"BackboneName"` - VirtualAddress string `json:"VirtualAddress,omitempty"` // set in responses and used for DELETE - Attributes map[string]interface{} `json:"Attributes"` -} - -type syncedService struct { - vad string // LSGC Virtual Address - service Service // LSLC Service - lasSync time.Time // timestamp of the last sync -} - -// NewTunneledService creates a TunneledService given a Service -func NewTunneledService(s *Service, vad string) (*TunneledService, error) { - // check if REST protocol is configured - var restProtocol Protocol - for _, p := range s.Protocols { - if p.Type == ProtocolTypeREST { - restProtocol = p - break - } - } - if restProtocol.Type == "" { - return nil, fmt.Errorf("Service without a configured REST protocol. Cannot be tunneled in GC") - } - - // check if REST protocol has endpoint defined - endpoint, ok := restProtocol.Endpoint[RESTEndpointURL] - if !ok { - return nil, fmt.Errorf("Service with a misconfigured REST protocol (no 'url'). Cannot be tunneled in GC") - } - endpointURL, _ := endpoint.(string) - - return &TunneledService{ - Endpoint: endpointURL, - BackboneName: HTTPBackboneName, - Attributes: map[string]interface{}{ - ServiceAttributeID: s.Id, - ServiceAttributeDescription: s.Description, - ServiceAttributeRegistration: *s, - }, - VirtualAddress: vad, - }, nil - -} - -func tunneledServiceFromResponse(res *http.Response) (*TunneledService, error) { - decoder := json.NewDecoder(res.Body) - defer res.Body.Close() - - var ts *TunneledService - err := decoder.Decode(&ts) - if err != nil { - return nil, err - } - return ts, nil -} - -func (l *GCPublisher) added(s Service) { - if !s.isGCTunnelable() { - logger.Printf("Ignoring service that cannot be tunneled in GC: %v\n", s.Id) - return - } - - s.Type = ApiRegistrationType - l.mutex.Lock() - ssvc, ok := l.services[s.Id] - // create new service entry if doesn't exist (actually shouldn't) - if !ok { - ssvc = syncedService{ - service: s, - } - } - - // create tunneled service - tsvc, err := NewTunneledService(&ssvc.service, "") - if err != nil { - logger.Printf("Error creating GC Tunneled Service from the given Service: %v\n", err.Error()) - l.mutex.Unlock() - return - } - - // register in the GC - b, _ := json.Marshal(tsvc) - res, err := http.Post(l.serviceEndpoint.String(), "application/json", bytes.NewReader(b)) - - if err != nil { - logger.Printf("Error publishing new Service in GC: %v\n", err.Error()) - l.mutex.Unlock() - return - } - - // FIXME: should return http.StatusCreated (201)! - if res.StatusCode != http.StatusOK { - logger.Printf("Error publishing new Service in GC. Tunneling Service returns: %v\n", res.StatusCode) - l.mutex.Unlock() - return - } - - // Synced successfully - ssvc.lasSync = time.Now() - - // Parse the response to retrieve VAD - ts, err := tunneledServiceFromResponse(res) - if err != nil { - logger.Printf("Error parsing the Tunneling Service response: %v\n", err.Error()) - l.mutex.Unlock() - return - } - ssvc.vad = ts.VirtualAddress - logger.Printf("Published service %v in the GC, VAD: %v\n", s.Id, ssvc.vad) - - l.services[s.Id] = ssvc - l.mutex.Unlock() -} - -func (l *GCPublisher) updated(s Service) { - //////////////////////////////////////////// - // DISABLED FOR THE TIME BEING - // NM currently assigns a new VAD on update, which effectively has no other effect - // For now it makes more sense to skip update altogether. See LSGC-146 - //////////////////////////////////////////// - - // l.mutex.Lock() - // // check if service is known - // ssvc, ok := l.services[s.Id] - // if !ok { - // logger.Printf("Asked to update unknown service %v **will do nothing**", s.Id) - // } - // // replace the service - // ssvc.service = s - - // // create tunneled service - // tsvc, err := NewTunneledService(&ssvc.service, ssvc.vad) - // if err != nil { - // logger.Printf("Error creating GC Tunneled Service from the given Service: %v\n", err.Error()) - // l.mutex.Unlock() - // return - // } - - // // update in the GC - // b, _ := json.Marshal(tsvc) - // req, _ := http.NewRequest("PUT", l.serviceEndpoint.String(), bytes.NewReader(b)) - // res, err := http.DefaultClient.Do(req) - - // if err != nil { - // logger.Printf("Error updating Service in GC: %v\n", err.Error()) - // l.mutex.Unlock() - // return - // } - - // if res.StatusCode != http.StatusOK { - // logger.Printf("Error updating Service in GC. Tunneling Service returns: %v\n", res.StatusCode) - // l.mutex.Unlock() - // return - // } - - // // Synced successfully - // ssvc.lasSync = time.Now() - - // // Parse the response to retrieve VAD - // ts, err := tunneledServiceFromResponse(res) - // if err != nil { - // logger.Printf("Error parsing the Tunneling Service response: %v\n", err.Error()) - // l.mutex.Unlock() - // return - // } - // ssvc.vad = ts.VirtualAddress - // logger.Printf("Updated service %v in the GC, VAD: %v\n", s.Id, ssvc.vad) - - // l.services[s.Id] = ssvc - // l.mutex.Unlock() -} - -func (l *GCPublisher) deleted(id string) { - l.mutex.Lock() - // check if service is known - ssvc, ok := l.services[id] - - if !ok { - logger.Printf("Asked to delete unknown (not tunnellable?) service %v **will do nothing**", id) - l.mutex.Unlock() - return - } - - // delete in the GC - req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/%s", l.serviceEndpoint.String(), ssvc.vad), bytes.NewReader([]byte{})) - res, err := http.DefaultClient.Do(req) - - if err != nil { - logger.Printf("Error deleting Service in GC: %v\n", err.Error()) - delete(l.services, id) - l.mutex.Unlock() - return - } - - if res.StatusCode != http.StatusOK { - logger.Printf("Error deleting Service in GC. Tunneling Service returns: %v\n", res.StatusCode) - delete(l.services, id) - l.mutex.Unlock() - return - } - - logger.Printf("Deleted service %v from the GC, VAD: %v\n", id, ssvc.vad) - - delete(l.services, id) - l.mutex.Unlock() -} - -// NewGCPublisher instantiates a GCPublisher -func NewGCPublisher(endpoint url.URL) *GCPublisher { - return &GCPublisher{ - serviceEndpoint: endpoint, - services: make(map[string]syncedService), - mutex: sync.RWMutex{}, - } -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/init.go b/vendor/code.linksmart.eu/sc/service-catalog/service/init.go deleted file mode 100644 index 8151f1a7..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/init.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "log" - "os" - "strconv" -) - -var logger *log.Logger - -func init() { - logger = log.New(os.Stdout, loggerPrefix, 0) - - v, err := strconv.Atoi(os.Getenv("DEBUG")) - if err == nil && v == 1 { - logger.SetFlags(log.Ltime | log.Lshortfile) - } -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/main_test.go b/vendor/code.linksmart.eu/sc/service-catalog/service/main_test.go deleted file mode 100644 index e3d143ab..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/main_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "os" - "testing" -) - -const ( - TestApiLocation = "/sc" - TestStaticLocation = "/static" -) - -var ( - TestSupportedBackends = map[string]bool{ - CatalogBackendMemory: true, - CatalogBackendLevelDB: true, - } - TestStorageType string -) - -func TestMain(m *testing.M) { - for b, supported := range TestSupportedBackends { - if supported { - TestStorageType = b - if m.Run() == 1 { - os.Exit(1) - } - } - } - os.Exit(0) -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/remoteclient.go b/vendor/code.linksmart.eu/sc/service-catalog/service/remoteclient.go deleted file mode 100644 index cde42015..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/remoteclient.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "code.linksmart.eu/com/go-sec/auth/obtainer" - "code.linksmart.eu/sc/service-catalog/utils" -) - -type RemoteCatalogClient struct { - serverEndpoint *url.URL - ticket *obtainer.Client -} - -func NewRemoteCatalogClient(serverEndpoint string, ticket *obtainer.Client) (CatalogClient, error) { - // Check if serverEndpoint is a correct URL - endpointUrl, err := url.Parse(serverEndpoint) - if err != nil { - return nil, fmt.Errorf("Error parsing catalog endpoint url: %s", err) - } - - return &RemoteCatalogClient{ - serverEndpoint: endpointUrl, - ticket: ticket, - }, nil -} - -// Retrieves a service -func (c *RemoteCatalogClient) Get(id string) (*Service, error) { - res, err := utils.HTTPRequest("GET", - fmt.Sprintf("%v/%v", c.serverEndpoint, id), - nil, - nil, - c.ticket, - ) - if err != nil { - return nil, err - } - defer res.Body.Close() - - switch res.StatusCode { - case http.StatusBadRequest: - return nil, &BadRequestError{ErrorMsg(res)} - case http.StatusConflict: - return nil, &ConflictError{ErrorMsg(res)} - case http.StatusNotFound: - return nil, &NotFoundError{ErrorMsg(res)} - default: - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf(ErrorMsg(res)) - } - } - - decoder := json.NewDecoder(res.Body) - var s *Service - err = decoder.Decode(&s) - if err != nil { - return nil, err - } - - return s, nil -} - -// Adds a service -func (c *RemoteCatalogClient) Add(s *Service) (string, error) { - id := s.Id - service := *s - service.Id = "" - b, _ := json.Marshal(service) - - var ( - res *http.Response - err error - ) - - if id == "" { // Let the system generate an id - res, err = utils.HTTPRequest("POST", - c.serverEndpoint.String()+"/", - map[string][]string{"Content-Type": []string{"application/ld+json"}}, - bytes.NewReader(b), - c.ticket, - ) - if err != nil { - return "", err - } - defer res.Body.Close() - - } else { // User-defined id - - // Check if id is unique - resGet, err := utils.HTTPRequest("GET", - fmt.Sprintf("%v/%v", c.serverEndpoint, id), - nil, - nil, - c.ticket, - ) - if err != nil { - return "", err - } - defer resGet.Body.Close() - - // Make sure registration is not found - // catch every status but http.StatusNotFound - switch resGet.StatusCode { - case http.StatusOK: - return "", &ConflictError{fmt.Sprintf("Device id %s is not unique.", id)} - case http.StatusBadRequest: - return "", &BadRequestError{ErrorMsg(resGet)} - case http.StatusConflict: - return "", &ConflictError{ErrorMsg(resGet)} - default: - if resGet.StatusCode != http.StatusNotFound { - return "", fmt.Errorf(ErrorMsg(resGet)) - } - } - - // Now add - res, err = utils.HTTPRequest("PUT", - fmt.Sprintf("%v/%v", c.serverEndpoint, id), - map[string][]string{"Content-Type": []string{"application/ld+json"}}, - bytes.NewReader(b), - c.ticket, - ) - if err != nil { - return "", err - } - defer res.Body.Close() - } - - switch res.StatusCode { - case http.StatusBadRequest: - return "", &BadRequestError{ErrorMsg(res)} - case http.StatusConflict: - return "", &ConflictError{ErrorMsg(res)} - case http.StatusNotFound: - return "", &NotFoundError{ErrorMsg(res)} - default: - if res.StatusCode != http.StatusCreated { - return "", fmt.Errorf(ErrorMsg(res)) - } - } - - location, err := res.Location() - if err != nil { - return "", err - } - id = strings.SplitAfterN(location.String(), "", 2)[1] - - return id, nil -} - -// Updates a service -func (c *RemoteCatalogClient) Update(id string, s *Service) error { - // Check if id is found - resGet, err := utils.HTTPRequest("GET", - fmt.Sprintf("%v/%v", c.serverEndpoint, id), - nil, - nil, - c.ticket, - ) - if err != nil { - return err - } - defer resGet.Body.Close() - - switch resGet.StatusCode { - case http.StatusBadRequest: - return &BadRequestError{ErrorMsg(resGet)} - case http.StatusConflict: - return &ConflictError{ErrorMsg(resGet)} - case http.StatusNotFound: - return &NotFoundError{ErrorMsg(resGet)} - default: - if resGet.StatusCode != http.StatusOK { - return fmt.Errorf(ErrorMsg(resGet)) - } - } - - b, _ := json.Marshal(s) - res, err := utils.HTTPRequest("PUT", - fmt.Sprintf("%v/%v", c.serverEndpoint, id), - map[string][]string{"Content-Type": []string{"application/ld+json"}}, - bytes.NewReader(b), - c.ticket, - ) - if err != nil { - return err - } - defer res.Body.Close() - - switch res.StatusCode { - case http.StatusBadRequest: - return &BadRequestError{ErrorMsg(res)} - case http.StatusConflict: - return &ConflictError{ErrorMsg(res)} - case http.StatusNotFound: - return &NotFoundError{ErrorMsg(res)} - default: - if res.StatusCode != http.StatusOK { - return fmt.Errorf(ErrorMsg(res)) - } - } - - return nil -} - -// Deletes a service -func (c *RemoteCatalogClient) Delete(id string) error { - res, err := utils.HTTPRequest("DELETE", - fmt.Sprintf("%v/%v", c.serverEndpoint, id), - nil, - bytes.NewReader([]byte{}), - c.ticket, - ) - if err != nil { - return err - } - defer res.Body.Close() - - switch res.StatusCode { - case http.StatusBadRequest: - return &BadRequestError{ErrorMsg(res)} - case http.StatusConflict: - return &ConflictError{ErrorMsg(res)} - case http.StatusNotFound: - return &NotFoundError{ErrorMsg(res)} - default: - if res.StatusCode != http.StatusOK { - return fmt.Errorf(ErrorMsg(res)) - } - } - - return nil -} - -// Retrieves a page from the service collection -func (c *RemoteCatalogClient) List(page, perPage int) ([]Service, int, error) { - res, err := utils.HTTPRequest("GET", - fmt.Sprintf("%v?%v=%v&%v=%v", - c.serverEndpoint, utils.GetParamPage, page, utils.GetParamPerPage, perPage), - nil, - nil, - c.ticket, - ) - if err != nil { - return nil, 0, err - } - defer res.Body.Close() - - switch res.StatusCode { - case http.StatusBadRequest: - return nil, 0, &BadRequestError{ErrorMsg(res)} - case http.StatusConflict: - return nil, 0, &ConflictError{ErrorMsg(res)} - case http.StatusNotFound: - return nil, 0, &NotFoundError{ErrorMsg(res)} - default: - if res.StatusCode != http.StatusOK { - return nil, 0, fmt.Errorf(ErrorMsg(res)) - } - } - - decoder := json.NewDecoder(res.Body) - var coll Collection - err = decoder.Decode(&coll) - if err != nil { - return nil, 0, err - } - - return coll.Services, len(coll.Services), nil -} - -// Filter services -func (c *RemoteCatalogClient) Filter(path, op, value string, page, perPage int) ([]Service, int, error) { - res, err := utils.HTTPRequest("GET", - fmt.Sprintf("%v/%v/%v/%v?%v=%v&%v=%v", - c.serverEndpoint, path, op, value, utils.GetParamPage, page, utils.GetParamPerPage, perPage), - nil, - nil, - c.ticket, - ) - if err != nil { - return nil, 0, err - } - defer res.Body.Close() - - switch res.StatusCode { - case http.StatusBadRequest: - return nil, 0, &BadRequestError{ErrorMsg(res)} - case http.StatusConflict: - return nil, 0, &ConflictError{ErrorMsg(res)} - case http.StatusNotFound: - return nil, 0, &NotFoundError{ErrorMsg(res)} - default: - if res.StatusCode != http.StatusOK { - return nil, 0, fmt.Errorf(ErrorMsg(res)) - } - } - - decoder := json.NewDecoder(res.Body) - var coll Collection - err = decoder.Decode(&coll) - if err != nil { - return nil, 0, err - } - - return coll.Services, len(coll.Services), nil -} - -// Returns the message field of a resource.Error response -func ErrorMsg(res *http.Response) string { - decoder := json.NewDecoder(res.Body) - - var e Error - err := decoder.Decode(&e) - if err != nil { - return res.Status - } - return fmt.Sprintf("(%d) %s", res.StatusCode, e.Message) -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/service-registrar.go b/vendor/code.linksmart.eu/sc/service-catalog/service/service-registrar.go deleted file mode 100644 index 05f21f79..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/service-registrar.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package service - -import ( - "fmt" - "sync" - "time" - - "code.linksmart.eu/com/go-sec/auth/obtainer" - discovery "code.linksmart.eu/sc/service-catalog/discovery" -) - -const ( - keepaliveRetries = 5 -) - -// Registers service given a configured Catalog Client -func RegisterService(client CatalogClient, s *Service) error { - err := client.Update(s.Id, s) - if err != nil { - switch err.(type) { - case *NotFoundError: - // If not in the catalog - add - _, err = client.Add(s) - if err != nil { - logger.Printf("RegisterService() Error adding registration: %v", err) - return err - } - logger.Printf("RegisterService() Added Service registration %v", s.Id) - default: - logger.Printf("RegisterService() Error updating registration: %v", err) - return err - } - } else { - logger.Printf("RegisterService() Updated Service registration %v", s.Id) - } - return nil -} - -// Registers service in the remote catalog -// endpoint: catalog endpoint. If empty - will be discovered using DNS-SD -// s: service registration -// sigCh: channel for shutdown signalisation from upstream -// ticket: set to nil for no auth -func RegisterServiceWithKeepalive(endpoint string, discover bool, s Service, - sigCh <-chan bool, wg *sync.WaitGroup, ticket *obtainer.Client) { - defer wg.Done() - var err error - if discover { - endpoint, err = discovery.DiscoverCatalogEndpoint(DNSSDServiceType) - if err != nil { - logger.Printf("RegisterServiceWithKeepalive() ERROR: Failed to discover the endpoint: %v", err.Error()) - return - } - } - - // Configure client - client, err := NewRemoteCatalogClient(endpoint, ticket) - if err != nil { - logger.Printf("RegisterServiceWithKeepalive() ERROR: Failed to create remote-catalog client: %v", err.Error()) - return - } - - // Will not keepalive registration with a negative TTL - if s.Ttl <= 0 { - logger.Println("RegisterServiceWithKeepalive() WARNING: Registration has ttl <= 0. Will not start the keepalive routine") - RegisterService(client, &s) - - // catch a shutdown signal from the upstream - for _ = range sigCh { - logger.Printf("RegisterServiceWithKeepalive() Removing the registration %v/%v...", endpoint, s.Id) - client.Delete(s.Id) - if ticket != nil { - err := ticket.Delete() - if err != nil { - logger.Printf("RegisterServiceWithKeepalive() Error while deleting the TGT: %v", err.Error()) - } - } - return - } - } - logger.Printf("RegisterServiceWithKeepalive() Will register and update registration periodically: %v/%v", endpoint, s.Id) - - // Configure & start the keepalive routine - ksigCh := make(chan bool) - kerrCh := make(chan error) - go keepAlive(client, &s, ksigCh, kerrCh) - - for { - select { - // catch an error from the keepAlive routine - case e := <-kerrCh: - logger.Println("RegisterServiceWithKeepalive() ERROR:", e) - // Re-discover the endpoint if needed and start over - if discover { - endpoint, err = discovery.DiscoverCatalogEndpoint(DNSSDServiceType) - if err != nil { - logger.Println("RegisterServiceWithKeepalive() ERROR:", err.Error()) - return - } - } - logger.Println("RegisterServiceWithKeepalive() Will use the new endpoint: ", endpoint) - client, err := NewRemoteCatalogClient(endpoint, ticket) - if err != nil { - logger.Printf("RegisterServiceWithKeepalive() ERROR: Failed to create remote-catalog client: %v", err.Error()) - return - } - go keepAlive(client, &s, ksigCh, kerrCh) - - // catch a shutdown signal from the upstream - case <-sigCh: - logger.Printf("RegisterServiceWithKeepalive() Removing the registration %v/%v...", endpoint, s.Id) - // signal shutdown to the keepAlive routine & close channels - select { - case ksigCh <- true: - // delete entry in the remote catalog - client.Delete(s.Id) - if ticket != nil { - err := ticket.Delete() - if err != nil { - logger.Printf("RegisterServiceWithKeepalive() Error while deleting the TGT: %v", err.Error()) - } - } - case <-time.After(1 * time.Second): - logger.Printf("RegisterDeviceWithKeepalive(): timeout removing registration %v/%v: catalog unreachable", endpoint, s.Id) - } - - close(ksigCh) - close(kerrCh) - return - } - } -} - -// Keep a given registration alive -// client: configured client for the remote catalog -// s: registration to be kept alive -// sigCh: channel for shutdown signalisation from upstream -// errCh: channel for error signalisation to upstream -func keepAlive(client CatalogClient, s *Service, sigCh <-chan bool, errCh chan<- error) { - dur := (time.Duration(s.Ttl) * time.Second) / 2 - ticker := time.NewTicker(dur) - errTries := 0 - - // Register - RegisterService(client, s) - - for { - select { - case <-ticker.C: - err := client.Update(s.Id, s) - if err != nil { - switch err.(type) { - case *NotFoundError: - // If not in the catalog - add - logger.Printf("keepAlive() ERROR: Registration %v not found in the remote catalog. TTL expired?", s.Id) - _, err = client.Add(s) - if err != nil { - logger.Printf("keepAlive() Error adding registration: %v", err) - errTries += 1 - } else { - logger.Printf("keepAlive() Added Service registration %v", s.Id) - errTries = 0 - } - default: - logger.Printf("keepAlive() Error updating registration: %v", err) - errTries += 1 - } - } else { - logger.Printf("keepAlive() Updated Service registration %v", s.Id) - errTries = 0 - } - - if errTries >= keepaliveRetries { - errCh <- fmt.Errorf("Number of retries exceeded") - ticker.Stop() - return - } - case <-sigCh: - // logger.Println("keepAlive routine shutdown signalled by the upstream") - return - } - } -} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/utils/init.go b/vendor/code.linksmart.eu/sc/service-catalog/utils/init.go deleted file mode 100644 index d6cdd718..00000000 --- a/vendor/code.linksmart.eu/sc/service-catalog/utils/init.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT - -package utils - -import ( - "log" - "os" - "strconv" -) - -var logger *log.Logger - -func init() { - logger = log.New(os.Stdout, "[utils] ", 0) - - v, err := strconv.Atoi(os.Getenv("DEBUG")) - if err == nil && v == 1 { - logger.SetFlags(log.Ltime | log.Lshortfile) - } -} diff --git a/vendor/github.com/ancientlore/go-avltree/.travis.yml b/vendor/github.com/ancientlore/go-avltree/.travis.yml index 255b7eeb..694974c7 100644 --- a/vendor/github.com/ancientlore/go-avltree/.travis.yml +++ b/vendor/github.com/ancientlore/go-avltree/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.5 + - 1.10.4 - tip before_install: diff --git a/vendor/github.com/ancientlore/go-avltree/go.mod b/vendor/github.com/ancientlore/go-avltree/go.mod new file mode 100644 index 00000000..33cbfaf3 --- /dev/null +++ b/vendor/github.com/ancientlore/go-avltree/go.mod @@ -0,0 +1 @@ +module github.com/ancientlore/go-avltree diff --git a/vendor/github.com/ancientlore/go-avltree/objecttree.go b/vendor/github.com/ancientlore/go-avltree/objecttree.go index 6b3b7ac3..05e69c03 100644 --- a/vendor/github.com/ancientlore/go-avltree/objecttree.go +++ b/vendor/github.com/ancientlore/go-avltree/objecttree.go @@ -1,14 +1,12 @@ package avltree -import () - // ObjectTree is a specialization of Tree that hides the wrapping of Elements around objects. -// The object just needs to implement Interface +// The object just needs to implement Interface. type ObjectTree struct { Tree } -// Implement Interface so that your object can be sorted in the tree +// Interface should be implemented so that your object can be sorted in the tree. type Interface interface { // Return -1 if this < b, 0 if this == b, and 1 if this > b Compare(b Interface) int @@ -18,13 +16,13 @@ func objectCompare(o1 interface{}, o2 interface{}) int { return o1.(Interface).Compare(o2.(Interface)) } -// Initialize or reset an ObjectTree +// Init will initialize or reset an ObjectTree. func (t *ObjectTree) Init(flags byte) *ObjectTree { t.Tree.Init(objectCompare, flags) return t } -// Return an initialized ObjectTree +// NewObjectTree returns an initialized ObjectTree. func NewObjectTree(flags byte) *ObjectTree { return new(ObjectTree).Init(flags) } // Find returns the element where the comparison function matches diff --git a/vendor/github.com/ancientlore/go-avltree/objecttree_test.go b/vendor/github.com/ancientlore/go-avltree/objecttree_test.go deleted file mode 100644 index e957039c..00000000 --- a/vendor/github.com/ancientlore/go-avltree/objecttree_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package avltree - -import ( - "fmt" - "math/rand" - "testing" -) - -type MyObject struct { - Key string - Value string -} - -func (o MyObject) Compare(b Interface) int { - if o.Key < b.(MyObject).Key { - return -1 - } - if o.Key > b.(MyObject).Key { - return 1 - } - return 0 -} - -func TestObjectTree(t *testing.T) { - - // no duplicates tests - - tree := NewObjectTree(0) - - if tree.Tree.root != nil { - t.Errorf("Initialized tree root should be nil: %v\n", tree.Tree.root) - } - - if tree.Tree.treeFlags != 0 { - t.Errorf("Initialized tree flags should be zero: %v\n", tree.Tree.treeFlags) - } - - //if tree.Tree.compare != objectCompare { - // t.Errorf("Initialized tree compare function not correct: %v\n", tree.Tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Initialized tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - v, dupe := tree.Add(MyObject{"foo", "bar"}) - - if v.(MyObject).Key != "foo" || dupe != false { - t.Errorf("Result of add should be foo/false: %v/%v\n", v, dupe) - } - - if tree.Len() != 1 || tree.Cap() != 1 || tree.Height() != 1 { - t.Errorf("Tree sizes with one element should all be one: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - if tree.At(0).(MyObject).Key != "foo" { - t.Errorf("Single value should be foo: %v\n", tree.At(0)) - } - - v, dupe = tree.Add(MyObject{"foo", "baz"}) - - if v.(MyObject).Key != "foo" || dupe != true { - t.Errorf("Result of add should be foo/true: %v/%v\n", v, dupe) - } - - v, dupe = tree.Add(MyObject{"bar", "baz"}) - - if v.(MyObject).Key != "bar" || dupe != false { - t.Errorf("Result of add should be bar/false: %v/%v\n", v, dupe) - } - - if tree.At(1).(MyObject).Key != "foo" { - t.Errorf("Second value should be foo: %v\n", tree.At(0)) - } - - if tree.Len() != 2 || tree.Cap() != 3 || tree.Height() != 2 { - t.Errorf("Tree sizes with two elements should be 2/3/2: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // reinitialize and allow duplicates - - tree.Init(AllowDuplicates) - - if tree.Tree.root != nil { - t.Errorf("Reinitialized tree root should be nil: %v\n", tree.Tree.root) - } - - if tree.Tree.treeFlags != AllowDuplicates { - t.Errorf("Reinitialized tree flags should be AllowDuplicates: %v\n", tree.Tree.treeFlags) - } - - //if tree.Tree.compare != objectCompare { - // t.Errorf("Reinitialized tree compare function not correct: %v\n", tree.Tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Initialized tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - v, dupe = tree.Add(MyObject{"foo", "baz"}) - - if v.(MyObject).Key != "foo" || dupe != false { - t.Errorf("Result of add should be foo/false: %v/%v\n", v, dupe) - } - - if tree.Len() != 1 || tree.Cap() != 1 || tree.Height() != 1 { - t.Errorf("Tree sizes with one element should all be one: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - if tree.At(0).(MyObject).Key != "foo" { - t.Errorf("Single value should be foo: %v\n", tree.At(0)) - } - - v, dupe = tree.Add(MyObject{"foo", "baz"}) - - if v.(MyObject).Key != "foo" || dupe != false { - t.Errorf("Result of add should be foo/false: %v/%v\n", v, dupe) - } - - v, dupe = tree.Add(MyObject{"bar", "baz"}) - - if v.(MyObject).Key != "bar" || dupe != false { - t.Errorf("Result of add should be bar/false: %v/%v\n", v, dupe) - } - - if tree.At(2).(MyObject).Key != "foo" { - t.Errorf("Third value should be foo: %v\n", tree.At(0)) - } - - if tree.Len() != 3 || tree.Cap() != 3 || tree.Height() != 2 { - t.Errorf("Tree sizes with three elements should be 3/3/2: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // test Find - v = tree.Find(MyObject{"bar", ""}) - - if v.(MyObject).Key != "bar" { - t.Errorf("Find should locate bar: %v\n", v) - } - - v = tree.Find(MyObject{"foobar", ""}) - - if v != nil { - t.Errorf("Found an item that isn't there: %v\n", v) - } - - // test Data (slice) - slice := tree.Data() - - if len(slice) != 3 || slice[2].(MyObject).Key != "foo" { - t.Errorf("Slice is incorrect, expected 3/foo: %v/%v\n", len(slice), slice[2]) - } - - // test Do - - x := "" - tree.Do(func(z interface{}) bool { x += z.(MyObject).Key; return true }) - - if x != "barfoofoo" { - t.Errorf("Do function did not concat values correctly, expected barfoofoo: %d\n", x) - } - - // test Remove - - v, dupe = tree.Add(MyObject{"zoo", "berlin"}) - - if v.(MyObject).Key != "zoo" || dupe != false { - t.Errorf("Result of add should be zoo/false: %v/%v\n", v, dupe) - } - - v = tree.Remove(MyObject{"zoo", ""}) - - if v.(MyObject).Key != "zoo" || tree.Len() != 3 { - t.Errorf("Result of Remove should be zoo/3: %v/%v\n", v, tree.Len()) - } - - // test Iter - - tree.Add(MyObject{"zeek", ""}) - tree.Add(MyObject{"jim", ""}) - tree.Add(MyObject{"zope", ""}) - tree.Add(MyObject{"yikes", ""}) - - x = "aaaa" - for v := range tree.Iter() { - if v.(MyObject).Key < x { - t.Error("Iter expected", v.(MyObject).Key, "to be >", x) - } - x = v.(MyObject).Key - } - - if x != "zope" { - t.Errorf("Iter ran wrong number of elements, expected last element of zope, got %s\n", x) - } - - // test clear - - tree.Clear() - - if tree.Tree.root != nil { - t.Errorf("Cleared tree root should be nil: %v\n", tree.Tree.root) - } - - if tree.Tree.treeFlags != AllowDuplicates { - t.Errorf("Cleared tree flags should still be AllowDuplicates: %v\n", tree.Tree.treeFlags) - } - - //if tree.Tree.compare != objectCompare { - // t.Errorf("Cleared tree compare function not correct: %v\n", tree.Tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Cleared tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // one more test, larger data - - for j := 0; j < 100000; j++ { - tree.Add(MyObject{fmt.Sprintf("%d", rand.Int()), "Hello"}) - } - - if tree.Len() != 100000 { - t.Errorf("Expected 100000 elements, got %d\n", tree.Len()) - } - - var prev string - prev = "" - - // make sure elements sorted - tree.Do(func(elem interface{}) bool { - var cur string - cur = elem.(MyObject).Key - if prev > cur { - t.Errorf("Elements not in order, previous = %d, current = %d\n", prev, cur) - } - prev = cur - return true - }) - -} diff --git a/vendor/github.com/ancientlore/go-avltree/pairtree.go b/vendor/github.com/ancientlore/go-avltree/pairtree.go index 50f86cba..96497dd4 100644 --- a/vendor/github.com/ancientlore/go-avltree/pairtree.go +++ b/vendor/github.com/ancientlore/go-avltree/pairtree.go @@ -9,13 +9,13 @@ type PairTree struct { ObjectTree } -// Pair structure holds your key and value +// Pair combines a key and value. type Pair struct { Key string Value interface{} } -// compare function for Pairs +// Compare is the compare function for Pairs, based on Key. func (a Pair) Compare(b Interface) int { if a.Key < b.(Pair).Key { return -1 @@ -25,19 +25,19 @@ func (a Pair) Compare(b Interface) int { return 0 } -// Iterate function +// PairIterateFunc is the type of function used for iterating across Pairs. type PairIterateFunc func(v Pair) bool -// Initialize or reset a StringTree +// Init will initialize or reset a PairTree. func (t *PairTree) Init(flags byte) *PairTree { t.ObjectTree.Init(flags) return t } -// Return an initialized StringTree +// NewPairTree returns an initialized PairTree. func NewPairTree(flags byte) *PairTree { return new(PairTree).Init(flags) } -// At returns the value at the given index +// At returns the value at the given index. func (t *PairTree) At(index int) *Pair { v := t.ObjectTree.At(index) if v != nil { @@ -48,7 +48,7 @@ func (t *PairTree) At(index int) *Pair { } // Find returns the element where the comparison function matches -// the node's value and the given key value +// the node's value and the given key value. func (t *PairTree) Find(key string) *Pair { v := t.ObjectTree.Find(Pair{key, nil}) if v != nil { @@ -71,7 +71,7 @@ func (t *PairTree) chanIterate(c chan<- Pair) { close(c) } -// Iter returns a channel you can read through to fetch all the items +// Iter returns a channel you can read through to fetch all the items. func (t *PairTree) Iter() <-chan Pair { c := make(chan Pair) go t.chanIterate(c) @@ -114,7 +114,7 @@ func (t *PairTree) Remove(ptr string) *Pair { return nil } -// Remove removes the element at the given index +// RemoveAt removes the element at the given index. func (t *PairTree) RemoveAt(index int) *Pair { v := t.ObjectTree.RemoveAt(index) if v != nil { @@ -124,7 +124,7 @@ func (t *PairTree) RemoveAt(index int) *Pair { return nil } -// Print the values in the tree +// Print prints the values in the tree to the writer. func (t *PairTree) Print(w io.Writer, f PairIterateFunc, itemSiz int) { t.ObjectTree.Print(w, func(v interface{}) bool { return f(v.(Pair)) }, itemSiz) } diff --git a/vendor/github.com/ancientlore/go-avltree/pairtree_test.go b/vendor/github.com/ancientlore/go-avltree/pairtree_test.go deleted file mode 100644 index ae14c6af..00000000 --- a/vendor/github.com/ancientlore/go-avltree/pairtree_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package avltree - -import ( - "fmt" - "math/rand" - "testing" -) - -func TestPairTree(t *testing.T) { - - // no duplicates tests - - tree := NewPairTree(0) - - if tree.Tree.root != nil { - t.Errorf("Initialized tree root should be nil: %v\n", tree.Tree.root) - } - - if tree.Tree.treeFlags != 0 { - t.Errorf("Initialized tree flags should be zero: %v\n", tree.Tree.treeFlags) - } - - //if tree.Tree.compare != objectCompare { - // t.Errorf("Initialized tree compare function not correct: %v\n", tree.Tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Initialized tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - v, dupe := tree.Add(Pair{"foo", "bar"}) - - if v.Key != "foo" || dupe != false { - t.Errorf("Result of add should be foo/false: %v/%v\n", v, dupe) - } - - if tree.Len() != 1 || tree.Cap() != 1 || tree.Height() != 1 { - t.Errorf("Tree sizes with one element should all be one: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - if tree.At(0).Key != "foo" { - t.Errorf("Single value should be foo: %v\n", tree.At(0)) - } - - v, dupe = tree.Add(Pair{"foo", "baz"}) - - if v.Key != "foo" || dupe != true { - t.Errorf("Result of add should be foo/true: %v/%v\n", v, dupe) - } - - v, dupe = tree.Add(Pair{"bar", "baz"}) - - if v.Key != "bar" || dupe != false { - t.Errorf("Result of add should be bar/false: %v/%v\n", v, dupe) - } - - if tree.At(1).Key != "foo" { - t.Errorf("Second value should be foo: %v\n", tree.At(0)) - } - - if tree.Len() != 2 || tree.Cap() != 3 || tree.Height() != 2 { - t.Errorf("Tree sizes with two elements should be 2/3/2: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // reinitialize and allow duplicates - - tree.Init(AllowDuplicates) - - if tree.Tree.root != nil { - t.Errorf("Reinitialized tree root should be nil: %v\n", tree.Tree.root) - } - - if tree.Tree.treeFlags != AllowDuplicates { - t.Errorf("Reinitialized tree flags should be AllowDuplicates: %v\n", tree.Tree.treeFlags) - } - - //if tree.Tree.compare != objectCompare { - // t.Errorf("Reinitialized tree compare function not correct: %v\n", tree.Tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Initialized tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - v, dupe = tree.Add(Pair{"foo", "baz"}) - - if v.Key != "foo" || dupe != false { - t.Errorf("Result of add should be foo/false: %v/%v\n", v, dupe) - } - - if tree.Len() != 1 || tree.Cap() != 1 || tree.Height() != 1 { - t.Errorf("Tree sizes with one element should all be one: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - if tree.At(0).Key != "foo" { - t.Errorf("Single value should be foo: %v\n", tree.At(0)) - } - - v, dupe = tree.Add(Pair{"foo", "baz"}) - - if v.Key != "foo" || dupe != false { - t.Errorf("Result of add should be foo/false: %v/%v\n", v, dupe) - } - - v, dupe = tree.Add(Pair{"bar", "baz"}) - - if v.Key != "bar" || dupe != false { - t.Errorf("Result of add should be bar/false: %v/%v\n", v, dupe) - } - - if tree.At(2).Key != "foo" { - t.Errorf("Third value should be foo: %v\n", tree.At(0)) - } - - if tree.Len() != 3 || tree.Cap() != 3 || tree.Height() != 2 { - t.Errorf("Tree sizes with three elements should be 3/3/2: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // test Find - v = tree.Find("bar") - - if v == nil || v.Key != "bar" { - t.Errorf("Find should locate bar: %v\n", v) - } - - v = tree.Find("foobar") - - if v != nil { - t.Errorf("Found an item that isn't there: %v\n", v) - } - - // test Data (slice) - slice := tree.Data() - - if len(slice) != 3 || slice[2].Key != "foo" { - t.Errorf("Slice is incorrect, expected 3/foo: %v/%v\n", len(slice), slice[2]) - } - - // test Do - - x := "" - tree.Do(func(z Pair) bool { x += z.Key; return true }) - - if x != "barfoofoo" { - t.Errorf("Do function did not concat values correctly, expected barfoofoo: %d\n", x) - } - - // test Remove - - v, dupe = tree.Add(Pair{"zoo", "berlin"}) - - if v.Key != "zoo" || dupe != false { - t.Errorf("Result of add should be zoo/false: %v/%v\n", v, dupe) - } - - v = tree.Remove("zoo") - - if v == nil || v.Key != "zoo" || tree.Len() != 3 { - t.Errorf("Result of Remove should be zoo/3: %v/%v\n", v, tree.Len()) - } - - // test Iter - - tree.Add(Pair{"zeek", ""}) - tree.Add(Pair{"jim", ""}) - tree.Add(Pair{"zope", ""}) - tree.Add(Pair{"yikes", ""}) - - x = "aaaa" - for v := range tree.Iter() { - if v.Key < x { - t.Error("Iter expected", v.Key, "to be >", x) - } - x = v.Key - } - - if x != "zope" { - t.Errorf("Iter ran wrong number of elements, expected last element of zope, got %s\n", x) - } - - // test clear - - tree.Clear() - - if tree.Tree.root != nil { - t.Errorf("Cleared tree root should be nil: %v\n", tree.Tree.root) - } - - if tree.Tree.treeFlags != AllowDuplicates { - t.Errorf("Cleared tree flags should still be AllowDuplicates: %v\n", tree.Tree.treeFlags) - } - - //if tree.Tree.compare != objectCompare { - // t.Errorf("Cleared tree compare function not correct: %v\n", tree.Tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Cleared tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // one more test, larger data - - for j := 0; j < 100000; j++ { - tree.Add(Pair{fmt.Sprintf("%d", rand.Int()), "Hello"}) - } - - if tree.Len() != 100000 { - t.Errorf("Expected 100000 elements, got %d\n", tree.Len()) - } - - var prev string - prev = "" - - // make sure elements sorted - tree.Do(func(elem Pair) bool { - var cur string - cur = elem.Key - if prev > cur { - t.Errorf("Elements not in order, previous = %d, current = %d\n", prev, cur) - } - prev = cur - return true - }) - -} diff --git a/vendor/github.com/ancientlore/go-avltree/stringtree.go b/vendor/github.com/ancientlore/go-avltree/stringtree.go index abdc2582..5a17cba5 100644 --- a/vendor/github.com/ancientlore/go-avltree/stringtree.go +++ b/vendor/github.com/ancientlore/go-avltree/stringtree.go @@ -18,19 +18,19 @@ func stringCompare(s1 interface{}, s2 interface{}) int { return 0 } -// Iterate function +// StringIterateFunc defines the function type used when iterating a StringTree. type StringIterateFunc func(v string) bool -// Initialize or reset a StringTree +// Init will initialize or reset a StringTree. func (t *StringTree) Init(flags byte) *StringTree { t.Tree.Init(stringCompare, flags) return t } -// Return an initialized StringTree +// NewStringTree returns an initialized StringTree. func NewStringTree(flags byte) *StringTree { return new(StringTree).Init(flags) } -// At returns the value at the given index +// At returns the value at the given index. func (t *StringTree) At(index int) string { v := t.Tree.At(index) if v != nil { @@ -40,7 +40,7 @@ func (t *StringTree) At(index int) string { } // Find returns the element where the comparison function matches -// the node's value and the given key value +// the node's value and the given key value. func (t *StringTree) Find(key string) string { v := t.Tree.Find(key) if v != nil { @@ -62,7 +62,7 @@ func (t *StringTree) chanIterate(c chan<- string) { close(c) } -// Iter returns a channel you can read through to fetch all the items +// Iter returns a channel you can read through to fetch all the items. func (t *StringTree) Iter() <-chan string { c := make(chan string) go t.chanIterate(c) @@ -103,7 +103,7 @@ func (t *StringTree) Remove(ptr string) string { return "" } -// Remove removes the element at the given index +// RemoveAt removes the element at the given index. func (t *StringTree) RemoveAt(index int) string { v := t.Tree.RemoveAt(index) if v != nil { @@ -112,6 +112,7 @@ func (t *StringTree) RemoveAt(index int) string { return "" } +// Print prints the values of the StringTree to the given writer. func (t *StringTree) Print(w io.Writer, f StringIterateFunc, itemSiz int) { t.Tree.Print(w, func(v interface{}) bool { return f(v.(string)) }, itemSiz) } diff --git a/vendor/github.com/ancientlore/go-avltree/stringtree_test.go b/vendor/github.com/ancientlore/go-avltree/stringtree_test.go deleted file mode 100644 index aa8d41d2..00000000 --- a/vendor/github.com/ancientlore/go-avltree/stringtree_test.go +++ /dev/null @@ -1,237 +0,0 @@ -package avltree - -import ( - "fmt" - "math/rand" - "testing" -) - -func TestStringTree(t *testing.T) { - - // no duplicates tests - - tree := NewStringTree(0) - - if tree.Tree.root != nil { - t.Errorf("Initialized tree root should be nil: %v\n", tree.Tree.root) - } - - if tree.Tree.treeFlags != 0 { - t.Errorf("Initialized tree flags should be zero: %v\n", tree.Tree.treeFlags) - } - - //if tree.Tree.compare != stringCompare { - // t.Errorf("Initialized tree compare function not correct: %v\n", tree.Tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Initialized tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - v, dupe := tree.Add("14") - - if v != "14" || dupe != false { - t.Errorf("Result of add should be 14/false: %v/%v\n", v, dupe) - } - - if tree.Len() != 1 || tree.Cap() != 1 || tree.Height() != 1 { - t.Errorf("Tree sizes with one element should all be one: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - if tree.At(0) != "14" { - t.Errorf("Single value should be 14: %v\n", tree.At(0)) - } - - v, dupe = tree.Add("14") - - if v != "14" || dupe != true { - t.Errorf("Result of add should be 14/true: %v/%v\n", v, dupe) - } - - v, dupe = tree.Add("15") - - if v != "15" || dupe != false { - t.Errorf("Result of add should be 15/false: %v/%v\n", v, dupe) - } - - if tree.At(1) != "15" { - t.Errorf("Second value should be 15: %v\n", tree.At(0)) - } - - if tree.Len() != 2 || tree.Cap() != 3 || tree.Height() != 2 { - t.Errorf("Tree sizes with two elements should be 2/3/2: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // reinitialize and allow duplicates - - tree.Init(AllowDuplicates) - - if tree.Tree.root != nil { - t.Errorf("Reinitialized tree root should be nil: %v\n", tree.Tree.root) - } - - if tree.Tree.treeFlags != AllowDuplicates { - t.Errorf("Reinitialized tree flags should be AllowDuplicates: %v\n", tree.Tree.treeFlags) - } - - //if tree.Tree.compare != stringCompare { - // t.Errorf("Reinitialized tree compare function not correct: %v\n", tree.Tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Initialized tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - v, dupe = tree.Add("14") - - if v != "14" || dupe != false { - t.Errorf("Result of add should be 14/false: %v/%v\n", v, dupe) - } - - if tree.Len() != 1 || tree.Cap() != 1 || tree.Height() != 1 { - t.Errorf("Tree sizes with one element should all be one: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - if tree.At(0) != "14" { - t.Errorf("Single value should be 14: %v\n", tree.At(0)) - } - - v, dupe = tree.Add("14") - - if v != "14" || dupe != false { - t.Errorf("Result of add should be 14/false: %v/%v\n", v, dupe) - } - - v, dupe = tree.Add("15") - - if v != "15" || dupe != false { - t.Errorf("Result of add should be 15/false: %v/%v\n", v, dupe) - } - - if tree.At(2) != "15" { - t.Errorf("Third value should be 15: %v\n", tree.At(0)) - } - - if tree.Len() != 3 || tree.Cap() != 3 || tree.Height() != 2 { - t.Errorf("Tree sizes with three elements should be 3/3/2: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // test Find - var st string - if st != "" { - t.Errorf("Init string != empty string\n", st) - } - - v = tree.Find("15") - - if v != "15" { - t.Errorf("Find should locate 15: %v\n", v) - } - - v = tree.Find("90") - - if v != "" { - t.Errorf("Found an item that isn't there: %v\n", v) - } - - // test Data (slice) - slice := tree.Data() - - if len(slice) != 3 || slice[2] != "15" { - t.Errorf("Slice is incorrect, expected 3/15: %v/%v\n", len(slice), slice[2]) - } - - // test Do - - x := "" - tree.Do(func(z string) bool { x += z; return true }) - - if x != "141415" { - t.Errorf("Do function did not concatvalues correctly, expected 141415: %d\n", x) - } - - // test Remove - - v, dupe = tree.Add("16") - - if v != "16" || dupe != false { - t.Errorf("Result of add should be 16/false: %v/%v\n", v, dupe) - } - - v = tree.Remove("14") - - if v != "14" || tree.Len() != 3 { - t.Errorf("Result of Remove should be 14/3: %v/%v\n", v, tree.Len()) - } - - // test Iter - - tree.Add("20") - tree.Add("19") - tree.Add("18") - tree.Add("17") - - x = "13" - for v := range tree.Iter() { - if v <= x { - t.Error("Iter expected", v, "to be >", x) - } - x = v - } - - if x != "20" { - t.Errorf("Iter ran wrong number of elements, expected last element of 20, got %d\n", x) - } - - // test clear - - tree.Clear() - - if tree.Tree.root != nil { - t.Errorf("Cleared tree root should be nil: %v\n", tree.Tree.root) - } - - if tree.Tree.treeFlags != AllowDuplicates { - t.Errorf("Cleared tree flags should still be AllowDuplicates: %v\n", tree.Tree.treeFlags) - } - - //if tree.Tree.compare != stringCompare { - // t.Errorf("Cleared tree compare function not correct: %v\n", tree.Tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Cleared tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // one more test, larger data - - for j := 0; j < 100000; j++ { - tree.Add(fmt.Sprintf("%d", rand.Int())) - } - - if tree.Len() != 100000 { - t.Errorf("Expected 100000 elements, got %d\n", tree.Len()) - } - - var prev string - prev = "" - - // make sure elements sorted - tree.Do(func(elem string) bool { - var cur string - cur = elem - if prev > cur { - t.Errorf("Elements not in order, previous = %d, current = %d\n", prev, cur) - } - prev = cur - return true - }) - -} diff --git a/vendor/github.com/ancientlore/go-avltree/tree.go b/vendor/github.com/ancientlore/go-avltree/tree.go index c2dd5d1e..ff0ddacd 100644 --- a/vendor/github.com/ancientlore/go-avltree/tree.go +++ b/vendor/github.com/ancientlore/go-avltree/tree.go @@ -1,4 +1,7 @@ /* +Package avltree implements a height-balanced binary tree +with array-like indexing capability. + An AVL tree (Adel'son-Vel'skii & Landis) is a binary search tree in which the heights of the left and right subtrees of the root differ by at most one and in which the left @@ -34,13 +37,13 @@ const ( AllowDuplicates = 1 ) -// Definition of a comparison function +// CompareFunc defines the function type used to compare values. type CompareFunc func(v1 interface{}, v2 interface{}) int -// Iterate function +// IterateFunc defines the function type used for iterating a tree. type IterateFunc func(v interface{}) bool -// Tree object +// Tree stores data about the binary tree. type Tree struct { // root of the tree root *treeNode @@ -52,7 +55,7 @@ type Tree struct { treeFlags byte } -// Initialize or reset a Tree +// Init initializes or resets a Tree. func (t *Tree) Init(c CompareFunc, flags byte) *Tree { t.compare = c t.root = nil @@ -60,11 +63,11 @@ func (t *Tree) Init(c CompareFunc, flags byte) *Tree { return t } -// Return an initialized tree +// New returns an initialized tree. func New(c CompareFunc, flags byte) *Tree { return new(Tree).Init(c, flags) } // Clear removes all elements from the tree, keeping the -// current options and compare function +// current options and compare function. func (t *Tree) Clear() { t.Init(t.compare, t.treeFlags) } // calcHeightData contains information needed to compute the @@ -105,7 +108,7 @@ func (t *Tree) Height() int { return d.maxHeight } -// Len returns the number of elements in the tree +// Len returns the number of elements in the tree. func (t *Tree) Len() int { if t.root != nil { return t.root.size @@ -132,7 +135,7 @@ func (t *Tree) Cap() int { } // indexer recursively scans the tree to find the node -// at the given position +// at the given position. func indexer(node *treeNode, index int) *treeNode { if index < node.leftSize() { @@ -145,7 +148,7 @@ func indexer(node *treeNode, index int) *treeNode { return nil } -// At returns the value at the given index +// At returns the value at the given index. func (t *Tree) At(index int) interface{} { if t.root != nil && index < t.root.size && index >= 0 { @@ -158,14 +161,14 @@ func (t *Tree) At(index int) interface{} { return nil } -// findData is used when searching the tree +// findData is used when searching the tree. type findData struct { lookingFor interface{} compare CompareFunc } // finder recursively scans the tree to find the node with the -// value we're looking for +// value we're looking for. func (d *findData) finder(node *treeNode) *treeNode { if node != nil { @@ -181,7 +184,7 @@ func (d *findData) finder(node *treeNode) *treeNode { } // Find returns the element where the comparison function matches -// the node's value and the given key value +// the node's value and the given key value. func (t *Tree) Find(key interface{}) interface{} { if key != nil && t.root != nil { d := &findData{key, t.compare} @@ -194,13 +197,13 @@ func (t *Tree) Find(key interface{}) interface{} { return nil } -// iterData is used when iterating the tree +// iterData is used when iterating the tree. type iterData struct { iter IterateFunc } // iterate recursively traverses the tree and executes -// the iteration function +// the iteration function. func (d *iterData) iterate(node *treeNode) bool { var proceed bool @@ -243,7 +246,7 @@ func (t *Tree) chanIterate(c chan<- interface{}) { close(c) } -// Iter returns a channel you can read through to fetch all the items +// Iter returns a channel you can read through to fetch all the items. func (t *Tree) Iter() <-chan interface{} { c := make(chan interface{}) go t.chanIterate(c) diff --git a/vendor/github.com/ancientlore/go-avltree/tree_test.go b/vendor/github.com/ancientlore/go-avltree/tree_test.go deleted file mode 100644 index 2495645d..00000000 --- a/vendor/github.com/ancientlore/go-avltree/tree_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package avltree - -import ( - "math/rand" - "testing" -) - -func compareInt(a interface{}, b interface{}) int { - if a.(int) < b.(int) { - return -1 - } else if a.(int) > b.(int) { - return 1 - } - return 0 -} - -func TestTree(t *testing.T) { - - // no duplicates tests - - tree := New(compareInt, 0) - - if tree.root != nil { - t.Errorf("Initialized tree root should be nil: %v\n", tree.root) - } - - if tree.treeFlags != 0 { - t.Errorf("Initialized tree flags should be zero: %v\n", tree.treeFlags) - } - - //if tree.compare != compareInt { - // t.Errorf("Initialized tree compare function not correct: %v\n", tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Initialized tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - v, dupe := tree.Add(14) - - if v.(int) != 14 || dupe != false { - t.Errorf("Result of add should be 14/false: %v/%v\n", v, dupe) - } - - if tree.Len() != 1 || tree.Cap() != 1 || tree.Height() != 1 { - t.Errorf("Tree sizes with one element should all be one: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - if tree.At(0).(int) != 14 { - t.Errorf("Single value should be 14: %v\n", tree.At(0)) - } - - v, dupe = tree.Add(14) - - if v.(int) != 14 || dupe != true { - t.Errorf("Result of add should be 14/true: %v/%v\n", v, dupe) - } - - v, dupe = tree.Add(15) - - if v.(int) != 15 || dupe != false { - t.Errorf("Result of add should be 15/false: %v/%v\n", v, dupe) - } - - if tree.At(1).(int) != 15 { - t.Errorf("Second value should be 15: %v\n", tree.At(0)) - } - - if tree.Len() != 2 || tree.Cap() != 3 || tree.Height() != 2 { - t.Errorf("Tree sizes with two elements should be 2/3/2: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // reinitialize and allow duplicates - - tree.Init(compareInt, AllowDuplicates) - - if tree.root != nil { - t.Errorf("Reinitialized tree root should be nil: %v\n", tree.root) - } - - if tree.treeFlags != AllowDuplicates { - t.Errorf("Reinitialized tree flags should be AllowDuplicates: %v\n", tree.treeFlags) - } - - //if tree.compare != compareInt { - // t.Errorf("Reinitialized tree compare function not correct: %v\n", tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Initialized tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - v, dupe = tree.Add(14) - - if v.(int) != 14 || dupe != false { - t.Errorf("Result of add should be 14/false: %v/%v\n", v, dupe) - } - - if tree.Len() != 1 || tree.Cap() != 1 || tree.Height() != 1 { - t.Errorf("Tree sizes with one element should all be one: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - if tree.At(0).(int) != 14 { - t.Errorf("Single value should be 14: %v\n", tree.At(0)) - } - - v, dupe = tree.Add(14) - - if v.(int) != 14 || dupe != false { - t.Errorf("Result of add should be 14/false: %v/%v\n", v, dupe) - } - - v, dupe = tree.Add(15) - - if v.(int) != 15 || dupe != false { - t.Errorf("Result of add should be 15/false: %v/%v\n", v, dupe) - } - - if tree.At(2).(int) != 15 { - t.Errorf("Third value should be 15: %v\n", tree.At(0)) - } - - if tree.Len() != 3 || tree.Cap() != 3 || tree.Height() != 2 { - t.Errorf("Tree sizes with three elements should be 3/3/2: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // test Find - - v = tree.Find(15) - - if v.(int) != 15 { - t.Errorf("Find should locate 15: %v\n", v) - } - - v = tree.Find(90) - - if v != nil { - t.Errorf("Found an item that isn't there: %v\n", v) - } - - v = tree.Find(nil) - if v != nil { - t.Errorf("Found an item that is not there: %v\n", v) - } - - // test Data (slice) - slice := tree.Data() - - if len(slice) != 3 || slice[2].(int) != 15 { - t.Errorf("Slice is incorrect, expected 3/15: %v/%v\n", len(slice), slice[2].(int)) - } - - // test Do - - x := 0 - tree.Do(func(z interface{}) bool { x += z.(int); return true }) - - if x != 43 { - t.Errorf("Do function did not add up values correctly, expected 43: %d\n", x) - } - - // test Remove - - v, dupe = tree.Add(16) - - if v.(int) != 16 || dupe != false { - t.Errorf("Result of add should be 16/false: %v/%v\n", v, dupe) - } - - v = tree.Remove(14) - - if v.(int) != 14 || tree.Len() != 3 { - t.Errorf("Result of Remove should be 14/3: %v/%v\n", v, tree.Len()) - } - - // test Iter - - tree.Add(20) - tree.Add(19) - tree.Add(18) - tree.Add(17) - - x = 14 - for v := range tree.Iter() { - if v.(int) != x { - t.Error("Iter expected", x, "got", v.(int)) - } - x++ - } - - if x != 21 { - t.Errorf("Iter ran wrong number of elements, expected 7, got %d\n", x-14) - } - - // test clear - - tree.Clear() - - if tree.root != nil { - t.Errorf("Cleared tree root should be nil: %v\n", tree.root) - } - - if tree.treeFlags != AllowDuplicates { - t.Errorf("Cleared tree flags should still be AllowDuplicates: %v\n", tree.treeFlags) - } - - //if tree.compare != compareInt { - // t.Errorf("Cleared tree compare function not correct: %v\n", tree.compare) - //} - - if tree.Len() != 0 || tree.Cap() != 0 || tree.Height() != 0 { - t.Errorf("Cleared tree sizes should all be zero: %d Len, %d Cap, %d Height\n", - tree.Len(), tree.Cap(), tree.Height()) - } - - // one more test, larger data - - for j := 0; j < 100000; j++ { - tree.Add(rand.Int()) - } - - if tree.Len() != 100000 { - t.Errorf("Expected 100000 elements, got %d\n", tree.Len()) - } - - var prev int - prev = -1 - - // make sure elements sorted - tree.Do(func(elem interface{}) bool { - var cur int - cur = elem.(int) - if prev > cur { - t.Errorf("Elements not in order, previous = %d, current = %d\n", prev, cur) - } - prev = cur - return true - }) - -} diff --git a/vendor/github.com/ancientlore/go-avltree/treeprint.go b/vendor/github.com/ancientlore/go-avltree/treeprint.go index 86b65f49..f5ea9f4f 100644 --- a/vendor/github.com/ancientlore/go-avltree/treeprint.go +++ b/vendor/github.com/ancientlore/go-avltree/treeprint.go @@ -69,6 +69,7 @@ func (d *printData) printer(node *treeNode) { d.lineDepth -= (d.itemSize + 5) } +// Print prints the values of the Tree to the given writer. func (t *Tree) Print(w io.Writer, f IterateFunc, itemSiz int) { fmt.Fprintf(w, "treeNode-+-Left \t / Left High\n") diff --git a/vendor/github.com/ancientlore/go-avltree/treeremove.go b/vendor/github.com/ancientlore/go-avltree/treeremove.go index d0b276bf..8ec96b7d 100644 --- a/vendor/github.com/ancientlore/go-avltree/treeremove.go +++ b/vendor/github.com/ancientlore/go-avltree/treeremove.go @@ -248,7 +248,7 @@ func remove(node **treeNode, index int, shorter *bool) interface{} { return ptr } -// Remove removes the element at the given index +// RemoveAt removes the element at the given index. func (t *Tree) RemoveAt(index int) interface{} { if t.root != nil && index < t.root.size && index >= 0 { var shorter bool diff --git a/vendor/github.com/codegangsta/negroni/logger_test.go b/vendor/github.com/codegangsta/negroni/logger_test.go deleted file mode 100644 index 880337d1..00000000 --- a/vendor/github.com/codegangsta/negroni/logger_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package negroni - -import ( - "bytes" - "log" - "net/http" - "net/http/httptest" - "testing" -) - -func Test_Logger(t *testing.T) { - buff := bytes.NewBufferString("") - recorder := httptest.NewRecorder() - - l := NewLogger() - l.Logger = log.New(buff, "[negroni] ", 0) - - n := New() - // replace log for testing - n.Use(l) - n.UseHandler(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusNotFound) - })) - - req, err := http.NewRequest("GET", "http://localhost:3000/foobar", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(recorder, req) - expect(t, recorder.Code, http.StatusNotFound) - refute(t, len(buff.String()), 0) -} diff --git a/vendor/github.com/codegangsta/negroni/negroni_test.go b/vendor/github.com/codegangsta/negroni/negroni_test.go deleted file mode 100644 index 02dd6ed0..00000000 --- a/vendor/github.com/codegangsta/negroni/negroni_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package negroni - -import ( - "net/http" - "net/http/httptest" - "reflect" - "testing" -) - -/* Test Helpers */ -func expect(t *testing.T, a interface{}, b interface{}) { - if a != b { - t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func refute(t *testing.T, a interface{}, b interface{}) { - if a == b { - t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func TestNegroniRun(t *testing.T) { - // just test that Run doesn't bomb - go New().Run(":3000") -} - -func TestNegroniServeHTTP(t *testing.T) { - result := "" - response := httptest.NewRecorder() - - n := New() - n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - result += "foo" - next(rw, r) - result += "ban" - })) - n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - result += "bar" - next(rw, r) - result += "baz" - })) - n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - result += "bat" - rw.WriteHeader(http.StatusBadRequest) - })) - - n.ServeHTTP(response, (*http.Request)(nil)) - - expect(t, result, "foobarbatbazban") - expect(t, response.Code, http.StatusBadRequest) -} - -// Ensures that a Negroni middleware chain -// can correctly return all of its handlers. -func TestHandlers(t *testing.T) { - response := httptest.NewRecorder() - n := New() - handlers := n.Handlers() - expect(t, 0, len(handlers)) - - n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - rw.WriteHeader(http.StatusOK) - })) - - // Expects the length of handlers to be exactly 1 - // after adding exactly one handler to the middleware chain - handlers = n.Handlers() - expect(t, 1, len(handlers)) - - // Ensures that the first handler that is in sequence behaves - // exactly the same as the one that was registered earlier - handlers[0].ServeHTTP(response, (*http.Request)(nil), nil) - expect(t, response.Code, http.StatusOK) -} - -func TestNegroni_Use_Nil(t *testing.T) { - defer func() { - err := recover() - if err == nil { - t.Errorf("Expected negroni.Use(nil) to panic, but it did not") - } - }() - - n := New() - n.Use(nil) -} diff --git a/vendor/github.com/codegangsta/negroni/recovery_test.go b/vendor/github.com/codegangsta/negroni/recovery_test.go deleted file mode 100644 index 3fa264ac..00000000 --- a/vendor/github.com/codegangsta/negroni/recovery_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package negroni - -import ( - "bytes" - "log" - "net/http" - "net/http/httptest" - "testing" -) - -func TestRecovery(t *testing.T) { - buff := bytes.NewBufferString("") - recorder := httptest.NewRecorder() - - rec := NewRecovery() - rec.Logger = log.New(buff, "[negroni] ", 0) - - n := New() - // replace log for testing - n.Use(rec) - n.UseHandler(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { - panic("here is a panic!") - })) - n.ServeHTTP(recorder, (*http.Request)(nil)) - expect(t, recorder.Code, http.StatusInternalServerError) - refute(t, recorder.Body.Len(), 0) - refute(t, len(buff.String()), 0) -} diff --git a/vendor/github.com/codegangsta/negroni/response_writer_test.go b/vendor/github.com/codegangsta/negroni/response_writer_test.go deleted file mode 100644 index ed1ee70a..00000000 --- a/vendor/github.com/codegangsta/negroni/response_writer_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package negroni - -import ( - "bufio" - "net" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -type closeNotifyingRecorder struct { - *httptest.ResponseRecorder - closed chan bool -} - -func newCloseNotifyingRecorder() *closeNotifyingRecorder { - return &closeNotifyingRecorder{ - httptest.NewRecorder(), - make(chan bool, 1), - } -} - -func (c *closeNotifyingRecorder) close() { - c.closed <- true -} - -func (c *closeNotifyingRecorder) CloseNotify() <-chan bool { - return c.closed -} - -type hijackableResponse struct { - Hijacked bool -} - -func newHijackableResponse() *hijackableResponse { - return &hijackableResponse{} -} - -func (h *hijackableResponse) Header() http.Header { return nil } -func (h *hijackableResponse) Write(buf []byte) (int, error) { return 0, nil } -func (h *hijackableResponse) WriteHeader(code int) {} -func (h *hijackableResponse) Flush() {} -func (h *hijackableResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) { - h.Hijacked = true - return nil, nil, nil -} - -func TestResponseWriterWritingString(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - - rw.Write([]byte("Hello world")) - - expect(t, rec.Code, rw.Status()) - expect(t, rec.Body.String(), "Hello world") - expect(t, rw.Status(), http.StatusOK) - expect(t, rw.Size(), 11) - expect(t, rw.Written(), true) -} - -func TestResponseWriterWritingStrings(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - - rw.Write([]byte("Hello world")) - rw.Write([]byte("foo bar bat baz")) - - expect(t, rec.Code, rw.Status()) - expect(t, rec.Body.String(), "Hello worldfoo bar bat baz") - expect(t, rw.Status(), http.StatusOK) - expect(t, rw.Size(), 26) -} - -func TestResponseWriterWritingHeader(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - - rw.WriteHeader(http.StatusNotFound) - - expect(t, rec.Code, rw.Status()) - expect(t, rec.Body.String(), "") - expect(t, rw.Status(), http.StatusNotFound) - expect(t, rw.Size(), 0) -} - -func TestResponseWriterBefore(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - result := "" - - rw.Before(func(ResponseWriter) { - result += "foo" - }) - rw.Before(func(ResponseWriter) { - result += "bar" - }) - - rw.WriteHeader(http.StatusNotFound) - - expect(t, rec.Code, rw.Status()) - expect(t, rec.Body.String(), "") - expect(t, rw.Status(), http.StatusNotFound) - expect(t, rw.Size(), 0) - expect(t, result, "barfoo") -} - -func TestResponseWriterHijack(t *testing.T) { - hijackable := newHijackableResponse() - rw := NewResponseWriter(hijackable) - hijacker, ok := rw.(http.Hijacker) - expect(t, ok, true) - _, _, err := hijacker.Hijack() - if err != nil { - t.Error(err) - } - expect(t, hijackable.Hijacked, true) -} - -func TestResponseWriteHijackNotOK(t *testing.T) { - hijackable := new(http.ResponseWriter) - rw := NewResponseWriter(*hijackable) - hijacker, ok := rw.(http.Hijacker) - expect(t, ok, true) - _, _, err := hijacker.Hijack() - - refute(t, err, nil) -} - -func TestResponseWriterCloseNotify(t *testing.T) { - rec := newCloseNotifyingRecorder() - rw := NewResponseWriter(rec) - closed := false - notifier := rw.(http.CloseNotifier).CloseNotify() - rec.close() - select { - case <-notifier: - closed = true - case <-time.After(time.Second): - } - expect(t, closed, true) -} - -func TestResponseWriterFlusher(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - - _, ok := rw.(http.Flusher) - expect(t, ok, true) -} diff --git a/vendor/github.com/codegangsta/negroni/static_test.go b/vendor/github.com/codegangsta/negroni/static_test.go deleted file mode 100644 index 637cfcd6..00000000 --- a/vendor/github.com/codegangsta/negroni/static_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package negroni - -import ( - "bytes" - "net/http" - "net/http/httptest" - "testing" -) - -func TestStatic(t *testing.T) { - response := httptest.NewRecorder() - response.Body = new(bytes.Buffer) - - n := New() - n.Use(NewStatic(http.Dir("."))) - - req, err := http.NewRequest("GET", "http://localhost:3000/negroni.go", nil) - if err != nil { - t.Error(err) - } - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusOK) - expect(t, response.Header().Get("Expires"), "") - if response.Body.Len() == 0 { - t.Errorf("Got empty body for GET request") - } -} - -func TestStaticHead(t *testing.T) { - response := httptest.NewRecorder() - response.Body = new(bytes.Buffer) - - n := New() - n.Use(NewStatic(http.Dir("."))) - n.UseHandler(http.NotFoundHandler()) - - req, err := http.NewRequest("HEAD", "http://localhost:3000/negroni.go", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusOK) - if response.Body.Len() != 0 { - t.Errorf("Got non-empty body for HEAD request") - } -} - -func TestStaticAsPost(t *testing.T) { - response := httptest.NewRecorder() - - n := New() - n.Use(NewStatic(http.Dir("."))) - n.UseHandler(http.NotFoundHandler()) - - req, err := http.NewRequest("POST", "http://localhost:3000/negroni.go", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusNotFound) -} - -func TestStaticBadDir(t *testing.T) { - response := httptest.NewRecorder() - - n := Classic() - n.UseHandler(http.NotFoundHandler()) - - req, err := http.NewRequest("GET", "http://localhost:3000/negroni.go", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - refute(t, response.Code, http.StatusOK) -} - -func TestStaticOptionsServeIndex(t *testing.T) { - response := httptest.NewRecorder() - - n := New() - s := NewStatic(http.Dir(".")) - s.IndexFile = "negroni.go" - n.Use(s) - - req, err := http.NewRequest("GET", "http://localhost:3000/", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusOK) -} - -func TestStaticOptionsPrefix(t *testing.T) { - response := httptest.NewRecorder() - - n := New() - s := NewStatic(http.Dir(".")) - s.Prefix = "/public" - n.Use(s) - - // Check file content behaviour - req, err := http.NewRequest("GET", "http://localhost:3000/public/negroni.go", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusOK) -} diff --git a/vendor/github.com/codegangsta/negroni/translations/README_de_de.md b/vendor/github.com/codegangsta/negroni/translations/README_de_de.md deleted file mode 100644 index 237111c9..00000000 --- a/vendor/github.com/codegangsta/negroni/translations/README_de_de.md +++ /dev/null @@ -1,177 +0,0 @@ -# Negroni [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61) - -Negroni ist ein Ansatz für eine idiomatische Middleware in Go. Sie ist klein, nicht-intrusiv und unterstützt die Nutzung von `net/http` Handlern. - -Wenn Dir die Idee hinter [Martini](http://github.com/go-martini/martini) gefällt, aber Du denkst, es stecke zu viel Magie darin, dann ist Negroni eine passende Alternative. - -## Wo fange ich an? - -Nachdem Du Go installiert und den [GOPATH](http://golang.org/doc/code.html#GOPATH) eingerichtet hast, erstelle eine `.go`-Datei. Nennen wir sie `server.go`. - -~~~ go -package main - -import ( - "github.com/codegangsta/negroni" - "net/http" - "fmt" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Willkommen auf der Homepage!") - }) - - n := negroni.Classic() - n.UseHandler(mux) - n.Run(":3000") -} -~~~ - -Installiere nun das Negroni Package (**go 1.1** und höher werden vorausgesetzt): -~~~ -go get github.com/codegangsta/negroni -~~~ - -Dann starte Deinen Server: -~~~ -go run server.go -~~~ - -Nun läuft ein `net/http`-Webserver von Go unter `localhost:3000`. - -## Hilfe benötigt? -Wenn Du eine Frage hast oder Dir ein bestimmte Funktion wünscht, nutze die [Mailing Liste](https://groups.google.com/forum/#!forum/negroni-users). Issues auf Github werden ausschließlich für Bug Reports und Pull Requests genutzt. - -## Ist Negroni ein Framework? -Negroni ist **kein** Framework. Es ist eine Bibliothek, geschaffen, um kompatibel mit `net/http` zu sein. - -## Routing? -Negroni ist BYOR (Bring your own Router - Nutze Deinen eigenen Router). Die Go-Community verfügt bereits über eine Vielzahl von großartigen Routern. Negroni versucht möglichst alle zu unterstützen, indem es `net/http` vollständig unterstützt. Beispielsweise sieht eine Implementation mit [Gorilla Mux](http://github.com/gorilla/mux) folgendermaßen aus: - -~~~ go -router := mux.NewRouter() -router.HandleFunc("/", HomeHandler) - -n := negroni.New(Middleware1, Middleware2) -// Oder nutze eine Middleware mit der Use()-Funktion -n.Use(Middleware3) -// Der Router kommt als letztes -n.UseHandler(router) - -n.Run(":3000") -~~~ - -## `negroni.Classic()` -`negroni.Classic()` stellt einige Standard-Middlewares bereit, die für die meisten Anwendungen von Nutzen ist: - -* `negroni.Recovery` - Middleware für Panic Recovery . -* `negroni.Logging` - Anfrage/Rückmeldungs-Logging-Middleware. -* `negroni.Static` - Ausliefern von statischen Dateien unter dem "public" Verzeichnis. - -Dies macht es wirklich einfach, mit den nützlichen Funktionen von Negroni zu starten. - -## Handlers -Negroni stellt einen bidirektionalen Middleware-Flow bereit. Dies wird durch das `negroni.Handler`-Interface erreicht: - -~~~ go -type Handler interface { - ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) -} -~~~ - -Wenn eine Middleware nicht bereits den ResponseWriter genutzt hat, sollte sie die nächste `http.HandlerFunc` in der Verkettung von Middlewares aufrufen und diese ausführen. Das kann von großem Nutzen sein: - -~~~ go -func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - // Mache etwas vor dem Aufruf - next(rw, r) - // Mache etwas nach dem Aufruf -} -~~~ - -Und Du kannst eine Middleware durch die `Use`-Funktion der Verkettung von Middlewares zuordnen. - -~~~ go -n := negroni.New() -n.Use(negroni.HandlerFunc(MyMiddleware)) -~~~ - -Stattdessen kannst Du auch herkömmliche `http.Handler` zuordnen: - -~~~ go -n := negroni.New() - -mux := http.NewServeMux() -// Ordne Deine Routen zu - -n.UseHandler(mux) - -n.Run(":3000") -~~~ - -## `Run()` -Negroni hat eine nützliche Funktion namens `Run`. `Run` übernimmt eine Zeichenkette `addr` ähnlich wie [http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe). - -~~~ go -n := negroni.Classic() -// ... -log.Fatal(http.ListenAndServe(":8080", n)) -~~~ - -## Routenspezifische Middleware -Wenn Du eine Gruppe von Routen hast, welche alle die gleiche Middleware ausführen müssen, kannst Du einfach eine neue Negroni-Instanz erstellen und sie als Route-Handler nutzen: - -~~~ go -router := mux.NewRouter() -adminRoutes := mux.NewRouter() -// Füge die Admin-Routen hier hinzu - -// Erstelle eine neue Negroni-Instanz für die Admin-Middleware -router.Handle("/admin", negroni.New( - Middleware1, - Middleware2, - negroni.Wrap(adminRoutes), -)) -~~~ - -## Middlewares von Dritten - -Hier ist eine aktuelle Liste von Middlewares, die kompatible mit Negroni sind. Tue Dir keinen Zwang an, Dich einzutragen, wenn Du selbst eine Middleware programmiert hast: - - -| Middleware | Autor | Beschreibung | -| -----------|--------|-------------| -| [RestGate](https://github.com/pjebs/restgate) | [Prasanga Siripala](https://github.com/pjebs) | Sichere Authentifikation für Endpunkte einer REST API | -| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown | -| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Eine Middleware mit ein paar nützlichen Sicherheitseinstellungen | -| [JWT Middleware](https://github.com/auth0/go-jwt-middleware) | [Auth0](https://github.com/auth0) | Eine Middleware die nach JWTs im `Authorization`-Feld des Header sucht und sie dekodiert.| -| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Data Binding von HTTP-Anfragen in Structs | -| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-basierender Logger | -| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Rendere JSON, XML und HTML Vorlagen | -| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic Agent für die Go-Echtzeitumgebung | -| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | Kompression von HTTP-Rückmeldungen via GZIP | -| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | oAuth2 Middleware | -| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Session Management | -| [permissions2](https://github.com/xyproto/permissions2) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, Benutzer und Berechtigungen | -| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Generiere TinySVG, HTML und CSS spontan | -| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) Unterstützung | -| [xrequestid](https://github.com/pilu/xrequestid) | [Andrea Franz](https://github.com/pilu) | Eine Middleware die zufällige X-Request-Id-Header jedem Request anfügt | -| [VanGoH](https://github.com/auroratechnologies/vangoh) | [Taylor Wrobel](https://github.com/twrobel3) | Configurable [AWS-Style](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) HMAC-basierte Middleware zur Authentifikation | -| [stats](https://github.com/thoas/stats) | [Florent Messa](https://github.com/thoas) | Speichere wichtige Informationen über Deine Webanwendung (Reaktionszeit, etc.) | - -## Beispiele -[Alexander Rødseth](https://github.com/xyproto) programmierte [mooseware](https://github.com/xyproto/mooseware), ein Grundgerüst zum Erstellen von Negroni Middleware-Handerln. - -## Aktualisieren in Echtzeit? -[gin](https://github.com/codegangsta/gin) und [fresh](https://github.com/pilu/fresh) aktualisieren Deine Negroni-Anwendung automatisch. - -## Unverzichbare Informationen für Go- & Negronineulinge - -* [Nutze einen Kontext zum Ãœbertragen von Middlewareinformationen an Handler (Englisch)](http://elithrar.github.io/article/map-string-interface/) -* [Middlewares verstehen (Englisch)](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters) - -## Ãœber das Projekt - -Negroni wurde obsseziv von Niemand gerigeren als dem [Code Gangsta](http://codegangsta.io/) entwickelt. diff --git a/vendor/github.com/codegangsta/negroni/translations/README_pt_br.md b/vendor/github.com/codegangsta/negroni/translations/README_pt_br.md deleted file mode 100644 index d5b02fa3..00000000 --- a/vendor/github.com/codegangsta/negroni/translations/README_pt_br.md +++ /dev/null @@ -1,170 +0,0 @@ -# Negroni [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61) - -Negroni é uma abordagem idiomática para middleware web em Go. É pequeno, não intrusivo, e incentiva uso da biblioteca `net/http`. - -Se gosta da idéia do [Martini](http://github.com/go-martini/martini), mas acha que contém muita mágica, então Negroni é ideal. - -## Começando - -Depois de instalar Go e definir seu [GOPATH](http://golang.org/doc/code.html#GOPATH), criar seu primeirto arquivo `.go`. Iremos chamá-lo `server.go`. - -~~~ go -package main - -import ( - "github.com/codegangsta/negroni" - "net/http" - "fmt" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - n := negroni.Classic() - n.UseHandler(mux) - n.Run(":3000") -} -~~~ - -Depois instale o pacote Negroni (**go 1.1** ou superior) -~~~ -go get github.com/codegangsta/negroni -~~~ - -Depois execute seu servidor: -~~~ -go run server.go -~~~ - -Agora terá um servidor web Go net/http rodando em `localhost:3000`. - -## Precisa de Ajuda? -Se você tem uma pergunta ou pedido de recurso,[go ask the mailing list](https://groups.google.com/forum/#!forum/negroni-users). O Github issues para o Negroni será usado exclusivamente para Reportar bugs e pull requests. - -## Negroni é um Framework? -Negroni **não** é a framework. É uma biblioteca que é desenhada para trabalhar diretamente com net/http. - -## Roteamento? -Negroni é TSPR(Traga seu próprio Roteamento). A comunidade Go já tem um grande número de roteadores http disponíveis, Negroni tenta rodar bem com todos eles pelo suporte total `net/http`/ Por exemplo, a integração com [Gorilla Mux](http://github.com/gorilla/mux) se parece com isso: - -~~~ go -router := mux.NewRouter() -router.HandleFunc("/", HomeHandler) - -n := negroni.New(Middleware1, Middleware2) -// Or use a middleware with the Use() function -n.Use(Middleware3) -// router goes last -n.UseHandler(router) - -n.Run(":3000") -~~~ - -## `negroni.Classic()` -`negroni.Classic()` fornece alguns middlewares padrão que são úteis para maioria das aplicações: - -* `negroni.Recovery` - Panic Recovery Middleware. -* `negroni.Logging` - Request/Response Logging Middleware. -* `negroni.Static` - Static File serving under the "public" directory. - -Isso torna muito fácil começar com alguns recursos úteis do Negroni. - -## Handlers -Negroni fornece um middleware de fluxo bidirecional. Isso é feito através da interface `negroni.Handler`: - -~~~ go -type Handler interface { - ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) -} -~~~ - -Se um middleware não tenha escrito o ResponseWriter, ele deve chamar a próxima `http.HandlerFunc` na cadeia para produzir o próximo handler middleware. Isso pode ser usado muito bem: - -~~~ go -func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - // do some stuff before - next(rw, r) - // do some stuff after -} -~~~ - -E pode mapear isso para a cadeia de handler com a função `Use`: - -~~~ go -n := negroni.New() -n.Use(negroni.HandlerFunc(MyMiddleware)) -~~~ - -Você também pode mapear `http.Handler` antigos: - -~~~ go -n := negroni.New() - -mux := http.NewServeMux() -// map your routes - -n.UseHandler(mux) - -n.Run(":3000") -~~~ - -## `Run()` -Negroni tem uma função de conveniência chamada `Run`. `Run` pega um endereço de string idêntico para [http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe). - -~~~ go -n := negroni.Classic() -// ... -log.Fatal(http.ListenAndServe(":8080", n)) -~~~ - -## Middleware para Rotas Específicas -Se você tem um grupo de rota com rotas que precisam ser executadas por um middleware específico, pode simplesmente criar uma nova instância de Negroni e usar no seu Manipulador de rota. - -~~~ go -router := mux.NewRouter() -adminRoutes := mux.NewRouter() -// add admin routes here - -// Criar um middleware negroni para admin -router.Handle("/admin", negroni.New( - Middleware1, - Middleware2, - negroni.Wrap(adminRoutes), -)) -~~~ - -## Middleware de Terceiros - -Aqui está uma lista atual de Middleware Compatíveis com Negroni. Sinta se livre para mandar um PR vinculando seu middleware se construiu um: - - -| Middleware | Autor | Descrição | -| -----------|--------|-------------| -| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown | -| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Implementa rapidamente itens de segurança.| -| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Handler para mapeamento/validação de um request a estrutura. | -| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-based logger | -| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Pacote para renderizar JSON, XML, e templates HTML. | -| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime | -| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | Handler para adicionar compreção gzip para as requisições | -| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | Handler que prove sistema de login OAuth 2.0 para aplicações Martini. Google Sign-in, Facebook Connect e Github login são suportados. | -| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Handler que provê o serviço de sessão. | -| [permissions](https://github.com/xyproto/permissions) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, usuários e permissões. | -| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Pacote para gerar TinySVG, HTML e CSS em tempo real. | - -## Exemplos -[Alexander Rødseth](https://github.com/xyproto) criou [mooseware](https://github.com/xyproto/mooseware), uma estrutura para escrever um handler middleware Negroni. - -## Servidor com autoreload? -[gin](https://github.com/codegangsta/gin) e [fresh](https://github.com/pilu/fresh) são aplicativos para autoreload do Negroni. - -## Leitura Essencial para Iniciantes em Go & Negroni -* [Usando um contexto para passar informação de um middleware para o manipulador final](http://elithrar.github.io/article/map-string-interface/) -* [Entendendo middleware](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters) - - -## Sobre -Negroni é obsessivamente desenhado por ninguém menos que [Code Gangsta](http://codegangsta.io/) diff --git a/vendor/github.com/codegangsta/negroni/translations/README_zh_cn.md b/vendor/github.com/codegangsta/negroni/translations/README_zh_cn.md deleted file mode 100644 index 16217a30..00000000 --- a/vendor/github.com/codegangsta/negroni/translations/README_zh_cn.md +++ /dev/null @@ -1,182 +0,0 @@ -# Negroni [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61) - -在Go语言里,Negroni 是一个很地é“çš„ web 中间件,它是微型,éžåµŒå…¥å¼ï¼Œå¹¶é¼“励使用原生 `net/http` 处ç†å™¨çš„库。 - -如果你用过并喜欢 [Martini](http://github.com/go-martini/martini) 框架,但åˆä¸æƒ³æ¡†æž¶ä¸­æœ‰å¤ªå¤šé­”幻性的特å¾ï¼Œé‚£ Negroni 就是你的èœäº†ï¼Œç›¸ä¿¡å®ƒéžå¸¸é€‚åˆä½ ã€‚ - - -语言翻译: -* [Português Brasileiro (pt_BR)](translations/README_pt_br.md) -* [简体中文 (zh_CN)](translations/README_zh_cn.md) - -## 入门指导 - -当安装了 Go 语言并设置好了 [GOPATH](http://golang.org/doc/code.html#GOPATH) åŽï¼Œæ–°å»ºä½ ç¬¬ä¸€ä¸ª`.go` 文件,我们å«å®ƒ `server.go` å§ã€‚ - -~~~ go -package main - -import ( - "github.com/codegangsta/negroni" - "net/http" - "fmt" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - n := negroni.Classic() - n.UseHandler(mux) - n.Run(":3000") -} -~~~ - -然åŽå®‰è£… Negroni 包(它ä¾èµ– **Go 1.1** 或更高的版本): -~~~ -go get github.com/codegangsta/negroni -~~~ - -然åŽè¿è¡Œåˆšå»ºå¥½çš„ server.go 文件: -~~~ -go run server.go -~~~ - -这时一个 Go `net/http` Web æœåŠ¡å™¨å°±è·‘在 `localhost:3000` 上,使用æµè§ˆå™¨æ‰“å¼€ `localhost:3000` å¯ä»¥çœ‹åˆ°è¾“出结果。 - -## 需è¦ä½ çš„贡献 -如果你有问题或新想法,请到[邮件群组](https://groups.google.com/forum/#!forum/negroni-users)里å馈,GitHub issues 是专门给æ交 bug 报告和 pull 请求用途的,欢迎你的å‚与。 - -## Negroni 是一个框架å—? -Negroni **ä¸**是一个框架,它是为了方便使用 `net/http` 而设计的一个库而已。 - -## 路由呢? -Negroni 没有带路由功能,使用 Negroni 时,需è¦æ‰¾ä¸€ä¸ªé€‚åˆä½ çš„路由。ä¸è¿‡å¥½åœ¨ Go 社区里已ç»æœ‰ç›¸å½“多å¯ç”¨çš„路由,Negroni æ›´å–œæ¬¢å’Œé‚£äº›å®Œå…¨æ”¯æŒ `net/http` 库的路由组åˆä½¿ç”¨ï¼Œæ¯”å¦‚ï¼Œç»“åˆ [Gorilla Mux](http://github.com/gorilla/mux) 使用åƒè¿™æ ·ï¼š - -~~~ go -router := mux.NewRouter() -router.HandleFunc("/", HomeHandler) - -n := negroni.New(Middleware1, Middleware2) -// Or use a middleware with the Use() function -n.Use(Middleware3) -// router goes last -n.UseHandler(router) - -n.Run(":3000") -~~~ - -## `negroni.Classic()` ç»å…¸å®žä¾‹ -`negroni.Classic()` æ供一些默认的中间件,这些中间件在多数应用都很有用。 - -* `negroni.Recovery` - 异常(æ慌)æ¢å¤ä¸­é—´ä»¶ -* `negroni.Logging` - 请求 / å“应 log 日志中间件 -* `negroni.Static` - é™æ€æ–‡ä»¶å¤„ç†ä¸­é—´ä»¶ï¼Œé»˜è®¤ç›®å½•åœ¨ "public" 下. - -`negroni.Classic()` 让你一开始就éžå¸¸å®¹æ˜“上手 Negroni ,并使用它那些通用的功能。 - -## Handlers (处ç†å™¨) -Negroni æä¾›åŒå‘的中间件机制,这个特å¾å¾ˆæ£’,都是得益于 `negroni.Handler` 这个接å£ã€‚ - -~~~ go -type Handler interface { - ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) -} -~~~ - -如果一个中间件没有写入 ResponseWriter å“应,它会在中间件链里调用下一个 `http.HandlerFunc` 执行下去, 它å¯ä»¥è¿™ä¹ˆä¼˜é›…的使用。如下: - -~~~ go -func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - // do some stuff before - next(rw, r) - // do some stuff after -} -~~~ - -你也å¯ä»¥ç”¨ `Use` 函数把这些 `http.Handler` 处ç†å™¨å¼•è¿›åˆ°å¤„ç†å™¨é“¾ä¸Šæ¥ï¼š - -~~~ go -n := negroni.New() -n.Use(negroni.HandlerFunc(MyMiddleware)) -~~~ - -你还å¯ä»¥ä½¿ç”¨ `http.Handler`(s) 把 `http.Handler` 处ç†å™¨å¼•è¿›æ¥ã€‚ - -~~~ go -n := negroni.New() - -mux := http.NewServeMux() -// map your routes - -n.UseHandler(mux) - -n.Run(":3000") -~~~ - -## `Run()` -Negroni æä¾›ä¸€ä¸ªå¾ˆå¥½ç”¨çš„å‡½æ•°å« `Run` ,把地å€å­—符串传人该函数,å³å¯å®žçŽ°å¾ˆåœ°é“çš„ [http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe) 函数功能了。 - -~~~ go -n := negroni.Classic() -// ... -log.Fatal(http.ListenAndServe(":8080", n)) -~~~ - -## 特定路由中间件 -如果你需è¦ç¾¤ç»„路由功能,需è¦å€ŸåŠ©ç‰¹å®šçš„路由中间件完æˆï¼Œåšæ³•å¾ˆç®€å•ï¼Œåªéœ€å»ºç«‹ä¸€ä¸ªæ–° Negroni 实例,传人路由处ç†å™¨é‡Œå³å¯ã€‚ - -~~~ go -router := mux.NewRouter() -adminRoutes := mux.NewRouter() -// add admin routes here - -// Create a new negroni for the admin middleware -router.Handle("/admin", negroni.New( - Middleware1, - Middleware2, - negroni.Wrap(adminRoutes), -)) -~~~ - -## 第三方中间件 - -以下的兼容 Negroni 的中间件列表,如果你也有兼容 Negroni 的中间件,å¯ä»¥æ交到这个列表æ¥äº¤æ¢é“¾æŽ¥ï¼Œæˆ‘们很ä¹æ„åšè¿™æ ·æœ‰ç›Šçš„事情。 - - -| 中间件 | 作者 | æè¿° | -| -------------|------------|-------------| -| [RestGate](https://github.com/pjebs/restgate) | [Prasanga Siripala](https://github.com/pjebs) | REST API 接å£çš„å®‰å…¨è®¤è¯ | -| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | 优雅关闭 HTTP 的中间件 | -| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Middleware that implements a few quick security wins | -| [JWT Middleware](https://github.com/auth0/go-jwt-middleware) | [Auth0](https://github.com/auth0) | Middleware checks for a JWT on the `Authorization` header on incoming requests and decodes it| -| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | HTTP 请求数æ®æ³¨å…¥åˆ° structs 实体| -| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | 基于 Logrus-based logger 日志 | -| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | 渲染 JSON, XML and HTML 中间件 | -| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime | -| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | å“åº”æµ GZIP 压缩 | -| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | oAuth2 中间件 | -| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Session 会è¯ç®¡ç† | -| [permissions2](https://github.com/xyproto/permissions2) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, 用户和æƒé™ | -| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | å¿«é€Ÿç”Ÿæˆ TinySVG, HTML and CSS 中间件 | -| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support | -| [xrequestid](https://github.com/pilu/xrequestid) | [Andrea Franz](https://github.com/pilu) | ç»™æ¯ä¸ªè¯·æ±‚指定一个éšæœº X-Request-Id 头的中间件 | -| [VanGoH](https://github.com/auroratechnologies/vangoh) | [Taylor Wrobel](https://github.com/twrobel3) | Configurable [AWS-Style](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) 基于 HMAC 鉴æƒè®¤è¯çš„中间件 | -| [stats](https://github.com/thoas/stats) | [Florent Messa](https://github.com/thoas) | 检测 web 应用当å‰è¿è¡ŒçŠ¶æ€ä¿¡æ¯ (å“应时间等等。) | - -## 范例 -[Alexander Rødseth](https://github.com/xyproto) 创建的 [mooseware](https://github.com/xyproto/mooseware) 是一个写兼容 Negroni 中间件的处ç†å™¨éª¨æž¶çš„范例。 - -## å³æ—¶ç¼–译 -[gin](https://github.com/codegangsta/gin) å’Œ [fresh](https://github.com/pilu/fresh) 这两个应用是å³æ—¶ç¼–译的 Negroni 工具,推è用户开å‘的时候使用。 - -## Go & Negroni åˆå­¦è€…必读推è - -* [在中间件中使用上下文把消æ¯ä¼ é€’ç»™åŽç«¯å¤„ç†å™¨](http://elithrar.github.io/article/map-string-interface/) -* [了解中间件](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters) - -## 关于 - -Negroni ç”± [Code Gangsta](http://codegangsta.io/) 主导设计开å‘å®Œæˆ \ No newline at end of file diff --git a/vendor/github.com/codegangsta/negroni/translations/README_zh_tw.md b/vendor/github.com/codegangsta/negroni/translations/README_zh_tw.md deleted file mode 100644 index eff715e1..00000000 --- a/vendor/github.com/codegangsta/negroni/translations/README_zh_tw.md +++ /dev/null @@ -1,177 +0,0 @@ -# Negroni(尼格é¾å°¼) [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61) - -尼格é¾å°¼ç¬¦åˆGoçš„web 中介器特性. 精簡ã€éžä¾µå…¥å¼ã€é¼“勵使用 `net/http` Handler. - -如果你喜歡[Martini](http://github.com/go-martini/martini),但覺得這其中包太多神奇的功能,那麼尼格é¾å°¼æœƒæ˜¯ä½ çš„最佳é¸æ“‡ã€‚ - -## 入門 - -安è£å®ŒGo且設好[GOPATH](http://golang.org/doc/code.html#GOPATH),建立你的第一個`.go`檔。å¯ä»¥å‘½å為`server.go`。 - -~~~ go -package main - -import ( - "github.com/codegangsta/negroni" - "net/http" - "fmt" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - n := negroni.Classic() - n.UseHandler(mux) - n.Run(":3000") -} -~~~ - -安è£å°¼æ ¼é¾å°¼å¥—件 (最低需求為**go 1.1**或更高版本): -~~~ -go get github.com/codegangsta/negroni -~~~ - -執行伺æœå™¨: -~~~ -go run server.go -~~~ - -ä½ ç¾åœ¨èµ·äº†ä¸€å€‹Goçš„net/http網é ä¼ºæœå™¨åœ¨`localhost:3000`. - -## 有å•é¡Œ? -如果你有å•é¡Œæˆ–新功能建議,[到這郵件群組討論](https://groups.google.com/forum/#!forum/negroni-users)。尼格é¾å°¼åœ¨GitHub上的issues專欄是專門用來回報bugè·Ÿpull requests。 - -## 尼格é¾å°¼æ˜¯å€‹frameworkå—Ž? -尼格é¾å°¼**ä¸æ˜¯**framework,是個設計用來直接使用net/httpçš„library。 - -## 路由? -尼格é¾å°¼æ˜¯BYOR (Bring your own Router,帶給你自訂路由)。在Go社群已經有大é‡å¯ç”¨çš„http路由器, 尼格é¾å°¼è©¦è‘—åšå¥½å®Œå…¨æ”¯æ´`net/http`,例如與[Gorilla Mux](http://github.com/gorilla/mux)æ•´åˆ: - -~~~ go -router := mux.NewRouter() -router.HandleFunc("/", HomeHandler) - -n := negroni.New(中介器1, 中介器2) -// Or use a 中介器 with the Use() function -n.Use(中介器3) -// router goes last -n.UseHandler(router) - -n.Run(":3000") -~~~ - -## `negroni.Classic()` -`negroni.Classic()` æ供一些好用的é è¨­ä¸­ä»‹å™¨: - -* `negroni.Recovery` - Panic 還原中介器 -* `negroni.Logging` - Request/Response 紀錄中介器 -* `negroni.Static` - 在"public"目錄下的éœæ…‹æª”案æœå‹™ - -尼格é¾å°¼çš„這些功能讓你開發變得很簡單。 - -## 處ç†å™¨(Handlers) -尼格é¾å°¼æ供一個雙å‘中介器的機制,介é¢ç‚º`negroni.Handler`: - -~~~ go -type Handler interface { - ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) -} -~~~ - -如果中介器沒有寫入ResponseWriter,會呼å«é€šé“裡é¢çš„下個`http.HandlerFunc`讓給中介處ç†å™¨ã€‚å¯ä»¥è¢«ç”¨ä¾†åšè‰¯å¥½çš„應用: - -~~~ go -func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - // 在這之å‰åšä¸€äº›äº‹ - next(rw, r) - // 在這之後åšä¸€äº›äº‹ -} -~~~ - -然後你å¯ä»¥é€éŽ`Use`函數å°æ‡‰åˆ°è™•ç†å™¨çš„通é“: - -~~~ go -n := negroni.New() -n.Use(negroni.HandlerFunc(MyMiddleware)) -~~~ - -你也å¯ä»¥æ‡‰åŽŸå§‹çš„舊`http.Handler`: - -~~~ go -n := negroni.New() - -mux := http.NewServeMux() -// map your routes - -n.UseHandler(mux) - -n.Run(":3000") -~~~ - -## `Run()` -尼格é¾å°¼æœ‰ä¸€å€‹å¾ˆå¥½ç”¨çš„函數`Run`,`Run`接收addr字串辨識[http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe)。 - -~~~ go -n := negroni.Classic() -// ... -log.Fatal(http.ListenAndServe(":8080", n)) -~~~ - -## 路由特有中介器 -如果你有一群路由需è¦åŸ·è¡Œç‰¹åˆ¥çš„中介器,你å¯ä»¥ç°¡å–®çš„建立一個新的尼格é¾å°¼å¯¦é«”當作路由處ç†å™¨ã€‚ - -~~~ go -router := mux.NewRouter() -adminRoutes := mux.NewRouter() -// add admin routes here - -// 為管ç†ä¸­ä»‹å™¨å»ºç«‹ä¸€å€‹æ–°çš„尼格é¾å°¼ -router.Handle("/admin", negroni.New( - Middleware1, - Middleware2, - negroni.Wrap(adminRoutes), -)) -~~~ - -## 第三方中介器 - -以下為目å‰å°¼æ ¼é¾å°¼å…¼å®¹çš„中介器清單。如果你自己åšäº†ä¸€å€‹ä¸­ä»‹å™¨è«‹è‡ªç”±æ”¾å…¥ä½ çš„中介器互æ›é€£çµ: - -| 中介器 | 作者 | 說明 | -| -----------|--------|-------------| -| [RestGate](https://github.com/pjebs/restgate) | [Prasanga Siripala](https://github.com/pjebs) | REST APIå…¥å£çš„安全èªè­‰ | -| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | 優雅的HTTP關機 | -| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | 檢疫安全功能的中介器 | -| [JWT Middleware](https://github.com/auth0/go-jwt-middleware) | [Auth0](https://github.com/auth0) | 檢查JWT的中介器用來解æžå‚³å…¥è«‹æ±‚çš„`Authorization` header | -| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | å°‡HTTP請求轉到structs的資料ç¶å®š | -| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | 基於Logrus的紀錄器 | -| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | 渲染JSONã€XMLã€HTMLçš„æ¨£æ¿ | -| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | Go執行中的New Relic agent | -| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | GZIP回應壓縮 | -| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | oAuth2中介器 | -| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Sessionç®¡ç† | -| [permissions2](https://github.com/xyproto/permissions2) | [Alexander Rødseth](https://github.com/xyproto) | Cookiesèˆ‡ä½¿ç”¨è€…æ¬Šé™ | -| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | 快速產生TinySVGã€HTMã€CSS | -| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) 支æ´(CORS) | -| [xrequestid](https://github.com/pilu/xrequestid) | [Andrea Franz](https://github.com/pilu) | 在æ¯å€‹request指定一個隨機X-Request-Id header的中介器 | -| [VanGoH](https://github.com/auroratechnologies/vangoh) | [Taylor Wrobel](https://github.com/twrobel3) | Configurable [AWS-Style](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) HMAC 授權中介器 | -| [stats](https://github.com/thoas/stats) | [Florent Messa](https://github.com/thoas) | 儲存關於你的網é æ‡‰ç”¨è³‡è¨Š (回應時間之類) | - -## 範例 -[mooseware](https://github.com/xyproto/mooseware)是用來寫尼格é¾å°¼ä¸­ä»‹è™•ç†å™¨çš„骨架,由[Alexander Rødseth](https://github.com/xyproto)建立。 - -## å³æ™‚程å¼é‡è¼‰? -[gin](https://github.com/codegangsta/gin)å’Œ[fresh](https://github.com/pilu/fresh)兩個尼格é¾å°¼å³æ™‚é‡è¼‰çš„應用。 - -## Go & 尼格é¾å°¼åˆå­¸è€…必讀 - -* [使用Context將資訊從中介器é€åˆ°è™•ç†å™¨](http://elithrar.github.io/article/map-string-interface/) -* [ç†è§£ä¸­ä»‹å™¨](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters) - -## 關於 - -尼格é¾å°¼æ­£æ˜¯[Code Gangsta](http://codegangsta.io/)的執著設計。 -譯者: Festum Qin (Festum@G.PL) diff --git a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md b/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md deleted file mode 100644 index 4a68ba40..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md +++ /dev/null @@ -1,13 +0,0 @@ -`jwt` command-line tool -======================= - -This is a simple tool to sign, verify and show JSON Web Tokens from -the command line. - -The following will create and sign a token, then verify it and output the original claims: - - echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - - -To simply display a token, use: - - echo $JWT | jwt -show - diff --git a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go b/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go deleted file mode 100644 index c0371147..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go +++ /dev/null @@ -1,245 +0,0 @@ -// A useful example app. You can use this to debug your tokens on the command line. -// This is also a great place to look at how you might use this library. -// -// Example usage: -// The following will create and sign a token, then verify it and output the original claims. -// echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "strings" - - jwt "github.com/dgrijalva/jwt-go" -) - -var ( - // Options - flagAlg = flag.String("alg", "", "signing algorithm identifier") - flagKey = flag.String("key", "", "path to key file or '-' to read from stdin") - flagCompact = flag.Bool("compact", false, "output compact JSON") - flagDebug = flag.Bool("debug", false, "print out all kinds of debug data") - - // Modes - exactly one of these is required - flagSign = flag.String("sign", "", "path to claims object to sign or '-' to read from stdin") - flagVerify = flag.String("verify", "", "path to JWT token to verify or '-' to read from stdin") - flagShow = flag.String("show", "", "path to JWT file or '-' to read from stdin") -) - -func main() { - // Usage message if you ask for -help or if you mess up inputs. - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - fmt.Fprintf(os.Stderr, " One of the following flags is required: sign, verify\n") - flag.PrintDefaults() - } - - // Parse command line options - flag.Parse() - - // Do the thing. If something goes wrong, print error to stderr - // and exit with a non-zero status code - if err := start(); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -// Figure out which thing to do and then do that -func start() error { - if *flagSign != "" { - return signToken() - } else if *flagVerify != "" { - return verifyToken() - } else if *flagShow != "" { - return showToken() - } else { - flag.Usage() - return fmt.Errorf("None of the required flags are present. What do you want me to do?") - } -} - -// Helper func: Read input from specified file or stdin -func loadData(p string) ([]byte, error) { - if p == "" { - return nil, fmt.Errorf("No path specified") - } - - var rdr io.Reader - if p == "-" { - rdr = os.Stdin - } else { - if f, err := os.Open(p); err == nil { - rdr = f - defer f.Close() - } else { - return nil, err - } - } - return ioutil.ReadAll(rdr) -} - -// Print a json object in accordance with the prophecy (or the command line options) -func printJSON(j interface{}) error { - var out []byte - var err error - - if *flagCompact == false { - out, err = json.MarshalIndent(j, "", " ") - } else { - out, err = json.Marshal(j) - } - - if err == nil { - fmt.Println(string(out)) - } - - return err -} - -// Verify a token and output the claims. This is a great example -// of how to verify and view a token. -func verifyToken() error { - // get the token - tokData, err := loadData(*flagVerify) - if err != nil { - return fmt.Errorf("Couldn't read token: %v", err) - } - - // trim possible whitespace from token - tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) - if *flagDebug { - fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) - } - - // Parse the token. Load the key from command line option - token, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) { - data, err := loadData(*flagKey) - if err != nil { - return nil, err - } - if isEs() { - return jwt.ParseECPublicKeyFromPEM(data) - } - return data, nil - }) - - // Print some debug data - if *flagDebug && token != nil { - fmt.Fprintf(os.Stderr, "Header:\n%v\n", token.Header) - fmt.Fprintf(os.Stderr, "Claims:\n%v\n", token.Claims) - } - - // Print an error if we can't parse for some reason - if err != nil { - return fmt.Errorf("Couldn't parse token: %v", err) - } - - // Is token invalid? - if !token.Valid { - return fmt.Errorf("Token is invalid") - } - - // Print the token details - if err := printJSON(token.Claims); err != nil { - return fmt.Errorf("Failed to output claims: %v", err) - } - - return nil -} - -// Create, sign, and output a token. This is a great, simple example of -// how to use this library to create and sign a token. -func signToken() error { - // get the token data from command line arguments - tokData, err := loadData(*flagSign) - if err != nil { - return fmt.Errorf("Couldn't read token: %v", err) - } else if *flagDebug { - fmt.Fprintf(os.Stderr, "Token: %v bytes", len(tokData)) - } - - // parse the JSON of the claims - var claims jwt.MapClaims - if err := json.Unmarshal(tokData, &claims); err != nil { - return fmt.Errorf("Couldn't parse claims JSON: %v", err) - } - - // get the key - var key interface{} - key, err = loadData(*flagKey) - if err != nil { - return fmt.Errorf("Couldn't read key: %v", err) - } - - // get the signing alg - alg := jwt.GetSigningMethod(*flagAlg) - if alg == nil { - return fmt.Errorf("Couldn't find signing method: %v", *flagAlg) - } - - // create a new token - token := jwt.NewWithClaims(alg, claims) - - if isEs() { - if k, ok := key.([]byte); !ok { - return fmt.Errorf("Couldn't convert key data to key") - } else { - key, err = jwt.ParseECPrivateKeyFromPEM(k) - if err != nil { - return err - } - } - } - - if out, err := token.SignedString(key); err == nil { - fmt.Println(out) - } else { - return fmt.Errorf("Error signing token: %v", err) - } - - return nil -} - -// showToken pretty-prints the token on the command line. -func showToken() error { - // get the token - tokData, err := loadData(*flagShow) - if err != nil { - return fmt.Errorf("Couldn't read token: %v", err) - } - - // trim possible whitespace from token - tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) - if *flagDebug { - fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) - } - - token, err := jwt.Parse(string(tokData), nil) - if token == nil { - return fmt.Errorf("malformed token: %v", err) - } - - // Print the token details - fmt.Println("Header:") - if err := printJSON(token.Header); err != nil { - return fmt.Errorf("Failed to output header: %v", err) - } - - fmt.Println("Claims:") - if err := printJSON(token.Claims); err != nil { - return fmt.Errorf("Failed to output claims: %v", err) - } - - return nil -} - -func isEs() bool { - return strings.HasPrefix(*flagAlg, "ES") -} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go deleted file mode 100644 index 753047b1..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package jwt_test - -import ( - "crypto/ecdsa" - "io/ioutil" - "strings" - "testing" - - "github.com/dgrijalva/jwt-go" -) - -var ecdsaTestData = []struct { - name string - keys map[string]string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic ES256", - map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.eyJmb28iOiJiYXIifQ.feG39E-bn8HXAKhzDZq7yEAPWYDhZlwTn3sePJnU9VrGMmwdXAIEyoOnrjreYlVM_Z4N13eK9-TmMTWyfKJtHQ", - "ES256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic ES384", - map[string]string{"private": "test/ec384-private.pem", "public": "test/ec384-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzM4NCJ9.eyJmb28iOiJiYXIifQ.ngAfKMbJUh0WWubSIYe5GMsA-aHNKwFbJk_wq3lq23aPp8H2anb1rRILIzVR0gUf4a8WzDtrzmiikuPWyCS6CN4-PwdgTk-5nehC7JXqlaBZU05p3toM3nWCwm_LXcld", - "ES384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic ES512", - map[string]string{"private": "test/ec512-private.pem", "public": "test/ec512-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzUxMiJ9.eyJmb28iOiJiYXIifQ.AAU0TvGQOcdg2OvrwY73NHKgfk26UDekh9Prz-L_iWuTBIBqOFCWwwLsRiHB1JOddfKAls5do1W0jR_F30JpVd-6AJeTjGKA4C1A1H6gIKwRY0o_tFDIydZCl_lMBMeG5VNFAjO86-WCSKwc3hqaGkq1MugPRq_qrF9AVbuEB4JPLyL5", - "ES512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic ES256 invalid: foo => bar", - map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, - "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.MEQCIHoSJnmGlPaVQDqacx_2XlXEhhqtWceVopjomc2PJLtdAiAUTeGPoNYxZw0z8mgOnnIcjoxRuNDVZvybRZF3wR1l8W", - "ES256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestECDSAVerify(t *testing.T) { - for _, data := range ecdsaTestData { - var err error - - key, _ := ioutil.ReadFile(data.keys["public"]) - - var ecdsaKey *ecdsa.PublicKey - if ecdsaKey, err = jwt.ParseECPublicKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse ECDSA public key: %v", err) - } - - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err = method.Verify(strings.Join(parts[0:2], "."), parts[2], ecdsaKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestECDSASign(t *testing.T) { - for _, data := range ecdsaTestData { - var err error - key, _ := ioutil.ReadFile(data.keys["private"]) - - var ecdsaKey *ecdsa.PrivateKey - if ecdsaKey, err = jwt.ParseECPrivateKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse ECDSA private key: %v", err) - } - - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), ecdsaKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig == parts[2] { - t.Errorf("[%v] Identical signatures\nbefore:\n%v\nafter:\n%v", data.name, parts[2], sig) - } - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/example_test.go b/vendor/github.com/dgrijalva/jwt-go/example_test.go deleted file mode 100644 index ae8b788a..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/example_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package jwt_test - -import ( - "fmt" - "github.com/dgrijalva/jwt-go" - "time" -) - -// Example (atypical) using the StandardClaims type by itself to parse a token. -// The StandardClaims type is designed to be embedded into your custom types -// to provide standard validation features. You can use it alone, but there's -// no way to retrieve other fields after parsing. -// See the CustomClaimsType example for intended usage. -func ExampleNewWithClaims_standardClaims() { - mySigningKey := []byte("AllYourBase") - - // Create the Claims - claims := &jwt.StandardClaims{ - ExpiresAt: 15000, - Issuer: "test", - } - - token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) - ss, err := token.SignedString(mySigningKey) - fmt.Printf("%v %v", ss, err) - //Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.QsODzZu3lUZMVdhbO76u3Jv02iYCvEHcYVUI1kOWEU0 -} - -// Example creating a token using a custom claims type. The StandardClaim is embedded -// in the custom type to allow for easy encoding, parsing and validation of standard claims. -func ExampleNewWithClaims_customClaimsType() { - mySigningKey := []byte("AllYourBase") - - type MyCustomClaims struct { - Foo string `json:"foo"` - jwt.StandardClaims - } - - // Create the Claims - claims := MyCustomClaims{ - "bar", - jwt.StandardClaims{ - ExpiresAt: 15000, - Issuer: "test", - }, - } - - token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) - ss, err := token.SignedString(mySigningKey) - fmt.Printf("%v %v", ss, err) - //Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c -} - -// Example creating a token using a custom claims type. The StandardClaim is embedded -// in the custom type to allow for easy encoding, parsing and validation of standard claims. -func ExampleParseWithClaims_customClaimsType() { - tokenString := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c" - - type MyCustomClaims struct { - Foo string `json:"foo"` - jwt.StandardClaims - } - - // sample token is expired. override time so it parses as valid - at(time.Unix(0, 0), func() { - token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, func(token *jwt.Token) (interface{}, error) { - return []byte("AllYourBase"), nil - }) - - if claims, ok := token.Claims.(*MyCustomClaims); ok && token.Valid { - fmt.Printf("%v %v", claims.Foo, claims.StandardClaims.ExpiresAt) - } else { - fmt.Println(err) - } - }) - - // Output: bar 15000 -} - -// Override time value for tests. Restore default value after. -func at(t time.Time, f func()) { - jwt.TimeFunc = func() time.Time { - return t - } - f() - jwt.TimeFunc = time.Now -} - -// An example of parsing the error types using bitfield checks -func ExampleParse_errorChecking() { - // Token from another example. This token is expired - var tokenString = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c" - - token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { - return []byte("AllYourBase"), nil - }) - - if token.Valid { - fmt.Println("You look nice today") - } else if ve, ok := err.(*jwt.ValidationError); ok { - if ve.Errors&jwt.ValidationErrorMalformed != 0 { - fmt.Println("That's not even a token") - } else if ve.Errors&(jwt.ValidationErrorExpired|jwt.ValidationErrorNotValidYet) != 0 { - // Token is either expired or not active yet - fmt.Println("Timing is everything") - } else { - fmt.Println("Couldn't handle this token:", err) - } - } else { - fmt.Println("Couldn't handle this token:", err) - } - - // Output: Timing is everything -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go b/vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go deleted file mode 100644 index 8fb56782..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac_example_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package jwt_test - -import ( - "fmt" - "github.com/dgrijalva/jwt-go" - "io/ioutil" - "time" -) - -// For HMAC signing method, the key can be any []byte. It is recommended to generate -// a key using crypto/rand or something equivalent. You need the same key for signing -// and validating. -var hmacSampleSecret []byte - -func init() { - // Load sample key data - if keyData, e := ioutil.ReadFile("test/hmacTestKey"); e == nil { - hmacSampleSecret = keyData - } else { - panic(e) - } -} - -// Example creating, signing, and encoding a JWT token using the HMAC signing method -func ExampleNew_hmac() { - // Create a new token object, specifying signing method and the claims - // you would like it to contain. - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "foo": "bar", - "nbf": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(), - }) - - // Sign and get the complete encoded token as a string using the secret - tokenString, err := token.SignedString(hmacSampleSecret) - - fmt.Println(tokenString, err) - // Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.u1riaD1rW97opCoAuRCTy4w58Br-Zk-bh7vLiRIsrpU -} - -// Example parsing and validating a token using the HMAC signing method -func ExampleParse_hmac() { - // sample token string taken from the New example - tokenString := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.u1riaD1rW97opCoAuRCTy4w58Br-Zk-bh7vLiRIsrpU" - - // Parse takes the token string and a function for looking up the key. The latter is especially - // useful if you use multiple keys for your application. The standard is to use 'kid' in the - // head of the token to identify which key to use, but the parsed token (head and claims) is provided - // to the callback, providing flexibility. - token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - return hmacSampleSecret, nil - }) - - if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid { - fmt.Println(claims["foo"], claims["nbf"]) - } else { - fmt.Println(err) - } - - // Output: bar 1.4444784e+09 -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac_test.go b/vendor/github.com/dgrijalva/jwt-go/hmac_test.go deleted file mode 100644 index c7e114f4..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package jwt_test - -import ( - "github.com/dgrijalva/jwt-go" - "io/ioutil" - "strings" - "testing" -) - -var hmacTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "web sample", - "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk", - "HS256", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "HS384", - "eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.KWZEuOD5lbBxZ34g7F-SlVLAQ_r5KApWNWlZIIMyQVz5Zs58a7XdNzj5_0EcNoOy", - "HS384", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "HS512", - "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.CN7YijRX6Aw1n2jyI2Id1w90ja-DEMYiWixhYCyHnrZ1VfJRaFQz1bEbjjA5Fn4CLYaUG432dEYmSbS4Saokmw", - "HS512", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "web sample: invalid", - "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXo", - "HS256", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - false, - }, -} - -// Sample data from http://tools.ietf.org/html/draft-jones-json-web-signature-04#appendix-A.1 -var hmacTestKey, _ = ioutil.ReadFile("test/hmacTestKey") - -func TestHMACVerify(t *testing.T) { - for _, data := range hmacTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], hmacTestKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestHMACSign(t *testing.T) { - for _, data := range hmacTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), hmacTestKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} - -func BenchmarkHS256Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS256, hmacTestKey) -} - -func BenchmarkHS384Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS384, hmacTestKey) -} - -func BenchmarkHS512Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS512, hmacTestKey) -} diff --git a/vendor/github.com/dgrijalva/jwt-go/http_example_test.go b/vendor/github.com/dgrijalva/jwt-go/http_example_test.go deleted file mode 100644 index 82e9c50a..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/http_example_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package jwt_test - -// Example HTTP auth using asymmetric crypto/RSA keys -// This is based on a (now outdated) example at https://gist.github.com/cryptix/45c33ecf0ae54828e63b - -import ( - "bytes" - "crypto/rsa" - "fmt" - "github.com/dgrijalva/jwt-go" - "github.com/dgrijalva/jwt-go/request" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// location of the files used for signing and verification -const ( - privKeyPath = "test/sample_key" // openssl genrsa -out app.rsa keysize - pubKeyPath = "test/sample_key.pub" // openssl rsa -in app.rsa -pubout > app.rsa.pub -) - -var ( - verifyKey *rsa.PublicKey - signKey *rsa.PrivateKey - serverPort int - // storing sample username/password pairs - // don't do this on a real server - users = map[string]string{ - "test": "known", - } -) - -// read the key files before starting http handlers -func init() { - signBytes, err := ioutil.ReadFile(privKeyPath) - fatal(err) - - signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) - fatal(err) - - verifyBytes, err := ioutil.ReadFile(pubKeyPath) - fatal(err) - - verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) - fatal(err) - - http.HandleFunc("/authenticate", authHandler) - http.HandleFunc("/restricted", restrictedHandler) - - // Setup listener - listener, err := net.ListenTCP("tcp", &net.TCPAddr{}) - serverPort = listener.Addr().(*net.TCPAddr).Port - - log.Println("Listening...") - go func() { - fatal(http.Serve(listener, nil)) - }() -} - -var start func() - -func fatal(err error) { - if err != nil { - log.Fatal(err) - } -} - -// Define some custom types were going to use within our tokens -type CustomerInfo struct { - Name string - Kind string -} - -type CustomClaimsExample struct { - *jwt.StandardClaims - TokenType string - CustomerInfo -} - -func Example_getTokenViaHTTP() { - // See func authHandler for an example auth handler that produces a token - res, err := http.PostForm(fmt.Sprintf("http://localhost:%v/authenticate", serverPort), url.Values{ - "user": {"test"}, - "pass": {"known"}, - }) - if err != nil { - fatal(err) - } - - if res.StatusCode != 200 { - fmt.Println("Unexpected status code", res.StatusCode) - } - - // Read the token out of the response body - buf := new(bytes.Buffer) - io.Copy(buf, res.Body) - res.Body.Close() - tokenString := strings.TrimSpace(buf.String()) - - // Parse the token - token, err := jwt.ParseWithClaims(tokenString, &CustomClaimsExample{}, func(token *jwt.Token) (interface{}, error) { - // since we only use the one private key to sign the tokens, - // we also only use its public counter part to verify - return verifyKey, nil - }) - fatal(err) - - claims := token.Claims.(*CustomClaimsExample) - fmt.Println(claims.CustomerInfo.Name) - - //Output: test -} - -func Example_useTokenViaHTTP() { - - // Make a sample token - // In a real world situation, this token will have been acquired from - // some other API call (see Example_getTokenViaHTTP) - token, err := createToken("foo") - fatal(err) - - // Make request. See func restrictedHandler for example request processor - req, err := http.NewRequest("GET", fmt.Sprintf("http://localhost:%v/restricted", serverPort), nil) - fatal(err) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %v", token)) - res, err := http.DefaultClient.Do(req) - fatal(err) - - // Read the response body - buf := new(bytes.Buffer) - io.Copy(buf, res.Body) - res.Body.Close() - fmt.Println(buf.String()) - - // Output: Welcome, foo -} - -func createToken(user string) (string, error) { - // create a signer for rsa 256 - t := jwt.New(jwt.GetSigningMethod("RS256")) - - // set our claims - t.Claims = &CustomClaimsExample{ - &jwt.StandardClaims{ - // set the expire time - // see http://tools.ietf.org/html/draft-ietf-oauth-json-web-token-20#section-4.1.4 - ExpiresAt: time.Now().Add(time.Minute * 1).Unix(), - }, - "level1", - CustomerInfo{user, "human"}, - } - - // Creat token string - return t.SignedString(signKey) -} - -// reads the form values, checks them and creates the token -func authHandler(w http.ResponseWriter, r *http.Request) { - // make sure its post - if r.Method != "POST" { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintln(w, "No POST", r.Method) - return - } - - user := r.FormValue("user") - pass := r.FormValue("pass") - - log.Printf("Authenticate: user[%s] pass[%s]\n", user, pass) - - // check values - if user != "test" || pass != "known" { - w.WriteHeader(http.StatusForbidden) - fmt.Fprintln(w, "Wrong info") - return - } - - tokenString, err := createToken(user) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintln(w, "Sorry, error while Signing Token!") - log.Printf("Token Signing error: %v\n", err) - return - } - - w.Header().Set("Content-Type", "application/jwt") - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, tokenString) -} - -// only accessible with a valid token -func restrictedHandler(w http.ResponseWriter, r *http.Request) { - // Get token from request - token, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &CustomClaimsExample{}, func(token *jwt.Token) (interface{}, error) { - // since we only use the one private key to sign the tokens, - // we also only use its public counter part to verify - return verifyKey, nil - }) - - // If the token is missing or invalid, return error - if err != nil { - w.WriteHeader(http.StatusUnauthorized) - fmt.Fprintln(w, "Invalid token:", err) - return - } - - // Token is valid - fmt.Fprintln(w, "Welcome,", token.Claims.(*CustomClaimsExample).Name) - return -} diff --git a/vendor/github.com/dgrijalva/jwt-go/none_test.go b/vendor/github.com/dgrijalva/jwt-go/none_test.go deleted file mode 100644 index 29a69efe..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/none_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package jwt_test - -import ( - "github.com/dgrijalva/jwt-go" - "strings" - "testing" -) - -var noneTestData = []struct { - name string - tokenString string - alg string - key interface{} - claims map[string]interface{} - valid bool -}{ - { - "Basic", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", - "none", - jwt.UnsafeAllowNoneSignatureType, - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic - no key", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", - "none", - nil, - map[string]interface{}{"foo": "bar"}, - false, - }, - { - "Signed", - "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", - "none", - jwt.UnsafeAllowNoneSignatureType, - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestNoneVerify(t *testing.T) { - for _, data := range noneTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], data.key) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestNoneSign(t *testing.T) { - for _, data := range noneTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), data.key) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser_test.go b/vendor/github.com/dgrijalva/jwt-go/parser_test.go deleted file mode 100644 index 0c86801b..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/parser_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package jwt_test - -import ( - "crypto/rsa" - "encoding/json" - "fmt" - "reflect" - "testing" - "time" - - "github.com/dgrijalva/jwt-go" - "github.com/dgrijalva/jwt-go/test" -) - -var keyFuncError error = fmt.Errorf("error loading key") - -var ( - jwtTestDefaultKey *rsa.PublicKey - defaultKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return jwtTestDefaultKey, nil } - emptyKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, nil } - errorKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, keyFuncError } - nilKeyFunc jwt.Keyfunc = nil -) - -func init() { - jwtTestDefaultKey = test.LoadRSAPublicKeyFromDisk("test/sample_key.pub") -} - -var jwtTestData = []struct { - name string - tokenString string - keyfunc jwt.Keyfunc - claims jwt.Claims - valid bool - errors uint32 - parser *jwt.Parser -}{ - { - "basic", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - defaultKeyFunc, - jwt.MapClaims{"foo": "bar"}, - true, - 0, - nil, - }, - { - "basic expired", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "exp": float64(time.Now().Unix() - 100)}, - false, - jwt.ValidationErrorExpired, - nil, - }, - { - "basic nbf", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "nbf": float64(time.Now().Unix() + 100)}, - false, - jwt.ValidationErrorNotValidYet, - nil, - }, - { - "expired and nbf", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "nbf": float64(time.Now().Unix() + 100), "exp": float64(time.Now().Unix() - 100)}, - false, - jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, - nil, - }, - { - "basic invalid", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - defaultKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - nil, - }, - { - "basic nokeyfunc", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - nilKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorUnverifiable, - nil, - }, - { - "basic nokey", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - emptyKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - nil, - }, - { - "basic errorkey", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - errorKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorUnverifiable, - nil, - }, - { - "invalid signing method", - "", - defaultKeyFunc, - jwt.MapClaims{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - &jwt.Parser{ValidMethods: []string{"HS256"}}, - }, - { - "valid signing method", - "", - defaultKeyFunc, - jwt.MapClaims{"foo": "bar"}, - true, - 0, - &jwt.Parser{ValidMethods: []string{"RS256", "HS256"}}, - }, - { - "JSON Number", - "", - defaultKeyFunc, - jwt.MapClaims{"foo": json.Number("123.4")}, - true, - 0, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "Standard Claims", - "", - defaultKeyFunc, - &jwt.StandardClaims{ - ExpiresAt: time.Now().Add(time.Second * 10).Unix(), - }, - true, - 0, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "JSON Number - basic expired", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, - false, - jwt.ValidationErrorExpired, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "JSON Number - basic nbf", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100))}, - false, - jwt.ValidationErrorNotValidYet, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "JSON Number - expired and nbf", - "", // autogen - defaultKeyFunc, - jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100)), "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, - false, - jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, - &jwt.Parser{UseJSONNumber: true}, - }, -} - -func TestParser_Parse(t *testing.T) { - privateKey := test.LoadRSAPrivateKeyFromDisk("test/sample_key") - - // Iterate over test data set and run tests - for _, data := range jwtTestData { - // If the token string is blank, use helper function to generate string - if data.tokenString == "" { - data.tokenString = test.MakeSampleToken(data.claims, privateKey) - } - - // Parse the token - var token *jwt.Token - var err error - var parser = data.parser - if parser == nil { - parser = new(jwt.Parser) - } - // Figure out correct claims type - switch data.claims.(type) { - case jwt.MapClaims: - token, err = parser.ParseWithClaims(data.tokenString, jwt.MapClaims{}, data.keyfunc) - case *jwt.StandardClaims: - token, err = parser.ParseWithClaims(data.tokenString, &jwt.StandardClaims{}, data.keyfunc) - } - - // Verify result matches expectation - if !reflect.DeepEqual(data.claims, token.Claims) { - t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) - } - - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying token: %T:%v", data.name, err, err) - } - - if !data.valid && err == nil { - t.Errorf("[%v] Invalid token passed validation", data.name) - } - - if (err == nil && !token.Valid) || (err != nil && token.Valid) { - t.Errorf("[%v] Inconsistent behavior between returned error and token.Valid") - } - - if data.errors != 0 { - if err == nil { - t.Errorf("[%v] Expecting error. Didn't get one.", data.name) - } else { - - ve := err.(*jwt.ValidationError) - // compare the bitfield part of the error - if e := ve.Errors; e != data.errors { - t.Errorf("[%v] Errors don't match expectation. %v != %v", data.name, e, data.errors) - } - - if err.Error() == keyFuncError.Error() && ve.Inner != keyFuncError { - t.Errorf("[%v] Inner error does not match expectation. %v != %v", data.name, ve.Inner, keyFuncError) - } - } - } - if data.valid && token.Signature == "" { - t.Errorf("[%v] Signature is left unpopulated after parsing", data.name) - } - } -} - -// Helper method for benchmarking various methods -func benchmarkSigning(b *testing.B, method jwt.SigningMethod, key interface{}) { - t := jwt.New(method) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - if _, err := t.SignedString(key); err != nil { - b.Fatal(err) - } - } - }) - -} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/doc.go b/vendor/github.com/dgrijalva/jwt-go/request/doc.go deleted file mode 100644 index c01069c9..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/request/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Utility package for extracting JWT tokens from -// HTTP requests. -// -// The main function is ParseFromRequest and it's WithClaims variant. -// See examples for how to use the various Extractor implementations -// or roll your own. -package request diff --git a/vendor/github.com/dgrijalva/jwt-go/request/extractor.go b/vendor/github.com/dgrijalva/jwt-go/request/extractor.go deleted file mode 100644 index 14414fe2..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/request/extractor.go +++ /dev/null @@ -1,81 +0,0 @@ -package request - -import ( - "errors" - "net/http" -) - -// Errors -var ( - ErrNoTokenInRequest = errors.New("no token present in request") -) - -// Interface for extracting a token from an HTTP request. -// The ExtractToken method should return a token string or an error. -// If no token is present, you must return ErrNoTokenInRequest. -type Extractor interface { - ExtractToken(*http.Request) (string, error) -} - -// Extractor for finding a token in a header. Looks at each specified -// header in order until there's a match -type HeaderExtractor []string - -func (e HeaderExtractor) ExtractToken(req *http.Request) (string, error) { - // loop over header names and return the first one that contains data - for _, header := range e { - if ah := req.Header.Get(header); ah != "" { - return ah, nil - } - } - return "", ErrNoTokenInRequest -} - -// Extract token from request arguments. This includes a POSTed form or -// GET URL arguments. Argument names are tried in order until there's a match. -// This extractor calls `ParseMultipartForm` on the request -type ArgumentExtractor []string - -func (e ArgumentExtractor) ExtractToken(req *http.Request) (string, error) { - // Make sure form is parsed - req.ParseMultipartForm(10e6) - - // loop over arg names and return the first one that contains data - for _, arg := range e { - if ah := req.Form.Get(arg); ah != "" { - return ah, nil - } - } - - return "", ErrNoTokenInRequest -} - -// Tries Extractors in order until one returns a token string or an error occurs -type MultiExtractor []Extractor - -func (e MultiExtractor) ExtractToken(req *http.Request) (string, error) { - // loop over header names and return the first one that contains data - for _, extractor := range e { - if tok, err := extractor.ExtractToken(req); tok != "" { - return tok, nil - } else if err != ErrNoTokenInRequest { - return "", err - } - } - return "", ErrNoTokenInRequest -} - -// Wrap an Extractor in this to post-process the value before it's handed off. -// See AuthorizationHeaderExtractor for an example -type PostExtractionFilter struct { - Extractor - Filter func(string) (string, error) -} - -func (e *PostExtractionFilter) ExtractToken(req *http.Request) (string, error) { - if tok, err := e.Extractor.ExtractToken(req); tok != "" { - return e.Filter(tok) - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/extractor_example_test.go b/vendor/github.com/dgrijalva/jwt-go/request/extractor_example_test.go deleted file mode 100644 index a994ffe5..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/request/extractor_example_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package request - -import ( - "fmt" - "net/url" -) - -const ( - exampleTokenA = "A" -) - -func ExampleHeaderExtractor() { - req := makeExampleRequest("GET", "/", map[string]string{"Token": exampleTokenA}, nil) - tokenString, err := HeaderExtractor{"Token"}.ExtractToken(req) - if err == nil { - fmt.Println(tokenString) - } else { - fmt.Println(err) - } - //Output: A -} - -func ExampleArgumentExtractor() { - req := makeExampleRequest("GET", "/", nil, url.Values{"token": {extractorTestTokenA}}) - tokenString, err := ArgumentExtractor{"token"}.ExtractToken(req) - if err == nil { - fmt.Println(tokenString) - } else { - fmt.Println(err) - } - //Output: A -} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/extractor_test.go b/vendor/github.com/dgrijalva/jwt-go/request/extractor_test.go deleted file mode 100644 index e3bbb0a3..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/request/extractor_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package request - -import ( - "fmt" - "net/http" - "net/url" - "testing" -) - -var extractorTestTokenA = "A" -var extractorTestTokenB = "B" - -var extractorTestData = []struct { - name string - extractor Extractor - headers map[string]string - query url.Values - token string - err error -}{ - { - name: "simple header", - extractor: HeaderExtractor{"Foo"}, - headers: map[string]string{"Foo": extractorTestTokenA}, - query: nil, - token: extractorTestTokenA, - err: nil, - }, - { - name: "simple argument", - extractor: ArgumentExtractor{"token"}, - headers: map[string]string{}, - query: url.Values{"token": {extractorTestTokenA}}, - token: extractorTestTokenA, - err: nil, - }, - { - name: "multiple extractors", - extractor: MultiExtractor{ - HeaderExtractor{"Foo"}, - ArgumentExtractor{"token"}, - }, - headers: map[string]string{"Foo": extractorTestTokenA}, - query: url.Values{"token": {extractorTestTokenB}}, - token: extractorTestTokenA, - err: nil, - }, - { - name: "simple miss", - extractor: HeaderExtractor{"This-Header-Is-Not-Set"}, - headers: map[string]string{"Foo": extractorTestTokenA}, - query: nil, - token: "", - err: ErrNoTokenInRequest, - }, - { - name: "filter", - extractor: AuthorizationHeaderExtractor, - headers: map[string]string{"Authorization": "Bearer " + extractorTestTokenA}, - query: nil, - token: extractorTestTokenA, - err: nil, - }, -} - -func TestExtractor(t *testing.T) { - // Bearer token request - for _, data := range extractorTestData { - // Make request from test struct - r := makeExampleRequest("GET", "/", data.headers, data.query) - - // Test extractor - token, err := data.extractor.ExtractToken(r) - if token != data.token { - t.Errorf("[%v] Expected token '%v'. Got '%v'", data.name, data.token, token) - continue - } - if err != data.err { - t.Errorf("[%v] Expected error '%v'. Got '%v'", data.name, data.err, err) - continue - } - } -} - -func makeExampleRequest(method, path string, headers map[string]string, urlArgs url.Values) *http.Request { - r, _ := http.NewRequest(method, fmt.Sprintf("%v?%v", path, urlArgs.Encode()), nil) - for k, v := range headers { - r.Header.Set(k, v) - } - return r -} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/oauth2.go b/vendor/github.com/dgrijalva/jwt-go/request/oauth2.go deleted file mode 100644 index 5948694a..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/request/oauth2.go +++ /dev/null @@ -1,28 +0,0 @@ -package request - -import ( - "strings" -) - -// Strips 'Bearer ' prefix from bearer token string -func stripBearerPrefixFromTokenString(tok string) (string, error) { - // Should be a bearer token - if len(tok) > 6 && strings.ToUpper(tok[0:7]) == "BEARER " { - return tok[7:], nil - } - return tok, nil -} - -// Extract bearer token from Authorization header -// Uses PostExtractionFilter to strip "Bearer " prefix from header -var AuthorizationHeaderExtractor = &PostExtractionFilter{ - HeaderExtractor{"Authorization"}, - stripBearerPrefixFromTokenString, -} - -// Extractor for OAuth2 access tokens. Looks in 'Authorization' -// header then 'access_token' argument for a token. -var OAuth2Extractor = &MultiExtractor{ - AuthorizationHeaderExtractor, - ArgumentExtractor{"access_token"}, -} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/request.go b/vendor/github.com/dgrijalva/jwt-go/request/request.go deleted file mode 100644 index 1807b396..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/request/request.go +++ /dev/null @@ -1,24 +0,0 @@ -package request - -import ( - "github.com/dgrijalva/jwt-go" - "net/http" -) - -// Extract and parse a JWT token from an HTTP request. -// This behaves the same as Parse, but accepts a request and an extractor -// instead of a token string. The Extractor interface allows you to define -// the logic for extracting a token. Several useful implementations are provided. -func ParseFromRequest(req *http.Request, extractor Extractor, keyFunc jwt.Keyfunc) (token *jwt.Token, err error) { - return ParseFromRequestWithClaims(req, extractor, jwt.MapClaims{}, keyFunc) -} - -// ParseFromRequest but with custom Claims type -func ParseFromRequestWithClaims(req *http.Request, extractor Extractor, claims jwt.Claims, keyFunc jwt.Keyfunc) (token *jwt.Token, err error) { - // Extract token from request - if tokStr, err := extractor.ExtractToken(req); err == nil { - return jwt.ParseWithClaims(tokStr, claims, keyFunc) - } else { - return nil, err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/request/request_test.go b/vendor/github.com/dgrijalva/jwt-go/request/request_test.go deleted file mode 100644 index b4365cd8..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/request/request_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package request - -import ( - "fmt" - "github.com/dgrijalva/jwt-go" - "github.com/dgrijalva/jwt-go/test" - "net/http" - "net/url" - "reflect" - "strings" - "testing" -) - -var requestTestData = []struct { - name string - claims jwt.MapClaims - extractor Extractor - headers map[string]string - query url.Values - valid bool -}{ - { - "authorization bearer token", - jwt.MapClaims{"foo": "bar"}, - AuthorizationHeaderExtractor, - map[string]string{"Authorization": "Bearer %v"}, - url.Values{}, - true, - }, - { - "oauth bearer token - header", - jwt.MapClaims{"foo": "bar"}, - OAuth2Extractor, - map[string]string{"Authorization": "Bearer %v"}, - url.Values{}, - true, - }, - { - "oauth bearer token - url", - jwt.MapClaims{"foo": "bar"}, - OAuth2Extractor, - map[string]string{}, - url.Values{"access_token": {"%v"}}, - true, - }, - { - "url token", - jwt.MapClaims{"foo": "bar"}, - ArgumentExtractor{"token"}, - map[string]string{}, - url.Values{"token": {"%v"}}, - true, - }, -} - -func TestParseRequest(t *testing.T) { - // load keys from disk - privateKey := test.LoadRSAPrivateKeyFromDisk("../test/sample_key") - publicKey := test.LoadRSAPublicKeyFromDisk("../test/sample_key.pub") - keyfunc := func(*jwt.Token) (interface{}, error) { - return publicKey, nil - } - - // Bearer token request - for _, data := range requestTestData { - // Make token from claims - tokenString := test.MakeSampleToken(data.claims, privateKey) - - // Make query string - for k, vv := range data.query { - for i, v := range vv { - if strings.Contains(v, "%v") { - data.query[k][i] = fmt.Sprintf(v, tokenString) - } - } - } - - // Make request from test struct - r, _ := http.NewRequest("GET", fmt.Sprintf("/?%v", data.query.Encode()), nil) - for k, v := range data.headers { - if strings.Contains(v, "%v") { - r.Header.Set(k, fmt.Sprintf(v, tokenString)) - } else { - r.Header.Set(k, tokenString) - } - } - token, err := ParseFromRequestWithClaims(r, data.extractor, jwt.MapClaims{}, keyfunc) - - if token == nil { - t.Errorf("[%v] Token was not found: %v", data.name, err) - continue - } - if !reflect.DeepEqual(data.claims, token.Claims) { - t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) - } - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying token: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid token passed validation", data.name) - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go deleted file mode 100644 index 9045aaf3..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build go1.4 - -package jwt_test - -import ( - "crypto/rsa" - "io/ioutil" - "strings" - "testing" - - "github.com/dgrijalva/jwt-go" -) - -var rsaPSSTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic PS256", - "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9w", - "PS256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic PS384", - "eyJhbGciOiJQUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.w7-qqgj97gK4fJsq_DCqdYQiylJjzWONvD0qWWWhqEOFk2P1eDULPnqHRnjgTXoO4HAw4YIWCsZPet7nR3Xxq4ZhMqvKW8b7KlfRTb9cH8zqFvzMmybQ4jv2hKc3bXYqVow3AoR7hN_CWXI3Dv6Kd2X5xhtxRHI6IL39oTVDUQ74LACe-9t4c3QRPuj6Pq1H4FAT2E2kW_0KOc6EQhCLWEhm2Z2__OZskDC8AiPpP8Kv4k2vB7l0IKQu8Pr4RcNBlqJdq8dA5D3hk5TLxP8V5nG1Ib80MOMMqoS3FQvSLyolFX-R_jZ3-zfq6Ebsqr0yEb0AH2CfsECF7935Pa0FKQ", - "PS384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic PS512", - "eyJhbGciOiJQUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.GX1HWGzFaJevuSLavqqFYaW8_TpvcjQ8KfC5fXiSDzSiT9UD9nB_ikSmDNyDILNdtjZLSvVKfXxZJqCfefxAtiozEDDdJthZ-F0uO4SPFHlGiXszvKeodh7BuTWRI2wL9-ZO4mFa8nq3GMeQAfo9cx11i7nfN8n2YNQ9SHGovG7_T_AvaMZB_jT6jkDHpwGR9mz7x1sycckEo6teLdHRnH_ZdlHlxqknmyTu8Odr5Xh0sJFOL8BepWbbvIIn-P161rRHHiDWFv6nhlHwZnVzjx7HQrWSGb6-s2cdLie9QL_8XaMcUpjLkfOMKkDOfHo6AvpL7Jbwi83Z2ZTHjJWB-A", - "PS512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic PS256 invalid: foo => bar", - "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9W", - "PS256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestRSAPSSVerify(t *testing.T) { - var err error - - key, _ := ioutil.ReadFile("test/sample_key.pub") - var rsaPSSKey *rsa.PublicKey - if rsaPSSKey, err = jwt.ParseRSAPublicKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse RSA public key: %v", err) - } - - for _, data := range rsaPSSTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], rsaPSSKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestRSAPSSSign(t *testing.T) { - var err error - - key, _ := ioutil.ReadFile("test/sample_key") - var rsaPSSKey *rsa.PrivateKey - if rsaPSSKey, err = jwt.ParseRSAPrivateKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse RSA private key: %v", err) - } - - for _, data := range rsaPSSTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), rsaPSSKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig == parts[2] { - t.Errorf("[%v] Signatures shouldn't match\nnew:\n%v\noriginal:\n%v", data.name, sig, parts[2]) - } - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_test.go b/vendor/github.com/dgrijalva/jwt-go/rsa_test.go deleted file mode 100644 index 2e0f7853..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package jwt_test - -import ( - "github.com/dgrijalva/jwt-go" - "io/ioutil" - "strings" - "testing" -) - -var rsaTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic RS256", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - "RS256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic RS384", - "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", - "RS384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic RS512", - "eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.zBlLlmRrUxx4SJPUbV37Q1joRcI9EW13grnKduK3wtYKmDXbgDpF1cZ6B-2Jsm5RB8REmMiLpGms-EjXhgnyh2TSHE-9W2gA_jvshegLWtwRVDX40ODSkTb7OVuaWgiy9y7llvcknFBTIg-FnVPVpXMmeV_pvwQyhaz1SSwSPrDyxEmksz1hq7YONXhXPpGaNbMMeDTNP_1oj8DZaqTIL9TwV8_1wb2Odt_Fy58Ke2RVFijsOLdnyEAjt2n9Mxihu9i3PhNBkkxa2GbnXBfq3kzvZ_xxGGopLdHhJjcGWXO-NiwI9_tiu14NRv4L2xC0ItD9Yz68v2ZIZEp_DuzwRQ", - "RS512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic invalid: foo => bar", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - "RS256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestRSAVerify(t *testing.T) { - keyData, _ := ioutil.ReadFile("test/sample_key.pub") - key, _ := jwt.ParseRSAPublicKeyFromPEM(keyData) - - for _, data := range rsaTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], key) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestRSASign(t *testing.T) { - keyData, _ := ioutil.ReadFile("test/sample_key") - key, _ := jwt.ParseRSAPrivateKeyFromPEM(keyData) - - for _, data := range rsaTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), key) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} - -func TestRSAVerifyWithPreParsedPrivateKey(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key.pub") - parsedKey, err := jwt.ParseRSAPublicKeyFromPEM(key) - if err != nil { - t.Fatal(err) - } - testData := rsaTestData[0] - parts := strings.Split(testData.tokenString, ".") - err = jwt.SigningMethodRS256.Verify(strings.Join(parts[0:2], "."), parts[2], parsedKey) - if err != nil { - t.Errorf("[%v] Error while verifying key: %v", testData.name, err) - } -} - -func TestRSAWithPreParsedPrivateKey(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - t.Fatal(err) - } - testData := rsaTestData[0] - parts := strings.Split(testData.tokenString, ".") - sig, err := jwt.SigningMethodRS256.Sign(strings.Join(parts[0:2], "."), parsedKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", testData.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", testData.name, sig, parts[2]) - } -} - -func TestRSAKeyParsing(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - pubKey, _ := ioutil.ReadFile("test/sample_key.pub") - badKey := []byte("All your base are belong to key") - - // Test parsePrivateKey - if _, e := jwt.ParseRSAPrivateKeyFromPEM(key); e != nil { - t.Errorf("Failed to parse valid private key: %v", e) - } - - if k, e := jwt.ParseRSAPrivateKeyFromPEM(pubKey); e == nil { - t.Errorf("Parsed public key as valid private key: %v", k) - } - - if k, e := jwt.ParseRSAPrivateKeyFromPEM(badKey); e == nil { - t.Errorf("Parsed invalid key as valid private key: %v", k) - } - - // Test parsePublicKey - if _, e := jwt.ParseRSAPublicKeyFromPEM(pubKey); e != nil { - t.Errorf("Failed to parse valid public key: %v", e) - } - - if k, e := jwt.ParseRSAPublicKeyFromPEM(key); e == nil { - t.Errorf("Parsed private key as valid public key: %v", k) - } - - if k, e := jwt.ParseRSAPublicKeyFromPEM(badKey); e == nil { - t.Errorf("Parsed invalid key as valid private key: %v", k) - } - -} - -func BenchmarkRS256Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS256, parsedKey) -} - -func BenchmarkRS384Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS384, parsedKey) -} - -func BenchmarkRS512Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS512, parsedKey) -} diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem deleted file mode 100644 index a6882b3e..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIAh5qA3rmqQQuu0vbKV/+zouz/y/Iy2pLpIcWUSyImSwoAoGCCqGSM49 -AwEHoUQDQgAEYD54V/vp+54P9DXarYqx4MPcm+HKRIQzNasYSoRQHQ/6S6Ps8tpM -cT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== ------END EC PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem deleted file mode 100644 index 7191361e..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem +++ /dev/null @@ -1,4 +0,0 @@ ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYD54V/vp+54P9DXarYqx4MPcm+HK -RIQzNasYSoRQHQ/6S6Ps8tpMcT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== ------END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem deleted file mode 100644 index a86c823e..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem +++ /dev/null @@ -1,6 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MIGkAgEBBDCaCvMHKhcG/qT7xsNLYnDT7sE/D+TtWIol1ROdaK1a564vx5pHbsRy -SEKcIxISi1igBwYFK4EEACKhZANiAATYa7rJaU7feLMqrAx6adZFNQOpaUH/Uylb -ZLriOLON5YFVwtVUpO1FfEXZUIQpptRPtc5ixIPY658yhBSb6irfIJUSP9aYTflJ -GKk/mDkK4t8mWBzhiD5B6jg9cEGhGgA= ------END EC PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem deleted file mode 100644 index e80d0056..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN PUBLIC KEY----- -MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE2Gu6yWlO33izKqwMemnWRTUDqWlB/1Mp -W2S64jizjeWBVcLVVKTtRXxF2VCEKabUT7XOYsSD2OufMoQUm+oq3yCVEj/WmE35 -SRipP5g5CuLfJlgc4Yg+Qeo4PXBBoRoA ------END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem deleted file mode 100644 index 213afaf1..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem +++ /dev/null @@ -1,7 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MIHcAgEBBEIB0pE4uFaWRx7t03BsYlYvF1YvKaBGyvoakxnodm9ou0R9wC+sJAjH -QZZJikOg4SwNqgQ/hyrOuDK2oAVHhgVGcYmgBwYFK4EEACOhgYkDgYYABAAJXIuw -12MUzpHggia9POBFYXSxaOGKGbMjIyDI+6q7wi7LMw3HgbaOmgIqFG72o8JBQwYN -4IbXHf+f86CRY1AA2wHzbHvt6IhkCXTNxBEffa1yMUgu8n9cKKF2iLgyQKcKqW33 -8fGOw/n3Rm2Yd/EB56u2rnD29qS+nOM9eGS+gy39OQ== ------END EC PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem deleted file mode 100644 index 02ea0220..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem +++ /dev/null @@ -1,6 +0,0 @@ ------BEGIN PUBLIC KEY----- -MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQACVyLsNdjFM6R4IImvTzgRWF0sWjh -ihmzIyMgyPuqu8IuyzMNx4G2jpoCKhRu9qPCQUMGDeCG1x3/n/OgkWNQANsB82x7 -7eiIZAl0zcQRH32tcjFILvJ/XCihdoi4MkCnCqlt9/HxjsP590ZtmHfxAeertq5w -9vakvpzjPXhkvoMt/Tk= ------END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/helpers.go b/vendor/github.com/dgrijalva/jwt-go/test/helpers.go deleted file mode 100644 index f84c3ef6..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package test - -import ( - "crypto/rsa" - "github.com/dgrijalva/jwt-go" - "io/ioutil" -) - -func LoadRSAPrivateKeyFromDisk(location string) *rsa.PrivateKey { - keyData, e := ioutil.ReadFile(location) - if e != nil { - panic(e.Error()) - } - key, e := jwt.ParseRSAPrivateKeyFromPEM(keyData) - if e != nil { - panic(e.Error()) - } - return key -} - -func LoadRSAPublicKeyFromDisk(location string) *rsa.PublicKey { - keyData, e := ioutil.ReadFile(location) - if e != nil { - panic(e.Error()) - } - key, e := jwt.ParseRSAPublicKeyFromPEM(keyData) - if e != nil { - panic(e.Error()) - } - return key -} - -func MakeSampleToken(c jwt.Claims, key interface{}) string { - token := jwt.NewWithClaims(jwt.SigningMethodRS256, c) - s, e := token.SignedString(key) - - if e != nil { - panic(e.Error()) - } - - return s -} diff --git a/vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey b/vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey deleted file mode 100644 index 435b8ddb..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey +++ /dev/null @@ -1 +0,0 @@ -#5K+¥¼ƒ~ew{¦Z³(æðTÉ(©„²ÒP.¿ÓûZ’ÒGï–Š´Ãwb="=.!r.OÀÍšõgЀ£ \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/test/sample_key b/vendor/github.com/dgrijalva/jwt-go/test/sample_key deleted file mode 100644 index abdbade3..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/sample_key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA4f5wg5l2hKsTeNem/V41fGnJm6gOdrj8ym3rFkEU/wT8RDtn -SgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7mCpz9Er5qLaMXJwZxzHzAahlfA0i -cqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBpHssPnpYGIn20ZZuNlX2BrClciHhC -PUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2XrHhR+1DcKJzQBSTAGnpYVaqpsAR -ap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3bODIRe1AuTyHceAbewn8b462yEWKA -Rdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy7wIDAQABAoIBAQCwia1k7+2oZ2d3 -n6agCAbqIE1QXfCmh41ZqJHbOY3oRQG3X1wpcGH4Gk+O+zDVTV2JszdcOt7E5dAy -MaomETAhRxB7hlIOnEN7WKm+dGNrKRvV0wDU5ReFMRHg31/Lnu8c+5BvGjZX+ky9 -POIhFFYJqwCRlopGSUIxmVj5rSgtzk3iWOQXr+ah1bjEXvlxDOWkHN6YfpV5ThdE -KdBIPGEVqa63r9n2h+qazKrtiRqJqGnOrHzOECYbRFYhexsNFz7YT02xdfSHn7gM -IvabDDP/Qp0PjE1jdouiMaFHYnLBbgvlnZW9yuVf/rpXTUq/njxIXMmvmEyyvSDn -FcFikB8pAoGBAPF77hK4m3/rdGT7X8a/gwvZ2R121aBcdPwEaUhvj/36dx596zvY -mEOjrWfZhF083/nYWE2kVquj2wjs+otCLfifEEgXcVPTnEOPO9Zg3uNSL0nNQghj -FuD3iGLTUBCtM66oTe0jLSslHe8gLGEQqyMzHOzYxNqibxcOZIe8Qt0NAoGBAO+U -I5+XWjWEgDmvyC3TrOSf/KCGjtu0TSv30ipv27bDLMrpvPmD/5lpptTFwcxvVhCs -2b+chCjlghFSWFbBULBrfci2FtliClOVMYrlNBdUSJhf3aYSG2Doe6Bgt1n2CpNn -/iu37Y3NfemZBJA7hNl4dYe+f+uzM87cdQ214+jrAoGAXA0XxX8ll2+ToOLJsaNT -OvNB9h9Uc5qK5X5w+7G7O998BN2PC/MWp8H+2fVqpXgNENpNXttkRm1hk1dych86 -EunfdPuqsX+as44oCyJGFHVBnWpm33eWQw9YqANRI+pCJzP08I5WK3osnPiwshd+ -hR54yjgfYhBFNI7B95PmEQkCgYBzFSz7h1+s34Ycr8SvxsOBWxymG5zaCsUbPsL0 -4aCgLScCHb9J+E86aVbbVFdglYa5Id7DPTL61ixhl7WZjujspeXZGSbmq0Kcnckb -mDgqkLECiOJW2NHP/j0McAkDLL4tysF8TLDO8gvuvzNC+WQ6drO2ThrypLVZQ+ry -eBIPmwKBgEZxhqa0gVvHQG/7Od69KWj4eJP28kq13RhKay8JOoN0vPmspXJo1HY3 -CKuHRG+AP579dncdUnOMvfXOtkdM4vk0+hWASBQzM9xzVcztCa+koAugjVaLS9A+ -9uQoqEeVNTckxx0S2bYevRy7hGQmUJTyQm3j1zEUR5jpdbL83Fbq ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub b/vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub deleted file mode 100644 index 03dc982a..00000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub +++ /dev/null @@ -1,9 +0,0 @@ ------BEGIN PUBLIC KEY----- -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4f5wg5l2hKsTeNem/V41 -fGnJm6gOdrj8ym3rFkEU/wT8RDtnSgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7 -mCpz9Er5qLaMXJwZxzHzAahlfA0icqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBp -HssPnpYGIn20ZZuNlX2BrClciHhCPUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2 -XrHhR+1DcKJzQBSTAGnpYVaqpsARap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3b -ODIRe1AuTyHceAbewn8b462yEWKARdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy -7wIDAQAB ------END PUBLIC KEY----- diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore b/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore new file mode 100644 index 00000000..47bb0de4 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/.gitignore @@ -0,0 +1,36 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*.msg +*.lok + +samples/trivial +samples/trivial2 +samples/sample +samples/reconnect +samples/ssl +samples/custom_store +samples/simple +samples/stdinpub +samples/stdoutsub +samples/routing \ No newline at end of file diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md new file mode 100644 index 00000000..9791dc60 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/CONTRIBUTING.md @@ -0,0 +1,56 @@ +Contributing to Paho +==================== + +Thanks for your interest in this project. + +Project description: +-------------------- + +The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT). +Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community. + +- https://projects.eclipse.org/projects/technology.paho + +Developer resources: +-------------------- + +Information regarding source code management, builds, coding standards, and more. + +- https://projects.eclipse.org/projects/technology.paho/developer + +Contributor License Agreement: +------------------------------ + +Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA). + +- http://www.eclipse.org/legal/CLA.php + +Contributing Code: +------------------ + +The Go client is developed in Github, see their documentation on the process of forking and pull requests; https://help.github.com/categories/collaborating-on-projects-using-pull-requests/ + +Git commit messages should follow the style described here; + +http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html + +Contact: +-------- + +Contact the project developers via the project's "dev" list. + +- https://dev.eclipse.org/mailman/listinfo/paho-dev + +Search for bugs: +---------------- + +This project uses Github issues to track ongoing development and issues. + +- https://github.com/eclipse/paho.mqtt.golang/issues + +Create a new bug: +----------------- + +Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome! + +- https://github.com/eclipse/paho.mqtt.golang/issues diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION new file mode 100644 index 00000000..34e49731 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/DISTRIBUTION @@ -0,0 +1,15 @@ + + +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE new file mode 100644 index 00000000..aa7cc810 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/LICENSE @@ -0,0 +1,87 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and + +b) in the case of each subsequent Contributor: + +i) changes to the Program, and + +ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. + +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. + +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. + +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and + +b) its license agreement: + +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and + +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. + +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and + +b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. \ No newline at end of file diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/README.md b/vendor/github.com/eclipse/paho.mqtt.golang/README.md new file mode 100644 index 00000000..81c7148e --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/README.md @@ -0,0 +1,67 @@ + +[![GoDoc](https://godoc.org/github.com/eclipse/paho.mqtt.golang?status.svg)](https://godoc.org/github.com/eclipse/paho.mqtt.golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/eclipse/paho.mqtt.golang)](https://goreportcard.com/report/github.com/eclipse/paho.mqtt.golang) + +Eclipse Paho MQTT Go client +=========================== + + +This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT Go client library. + +This code builds a library which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages. + +This library supports a fully asynchronous mode of operation. + + +Installation and Build +---------------------- + +This client is designed to work with the standard Go tools, so installation is as easy as: + +``` +go get github.com/eclipse/paho.mqtt.golang +``` + +The client depends on Google's [websockets](https://godoc.org/golang.org/x/net/websocket) and [proxy](https://godoc.org/golang.org/x/net/proxy) package, +also easily installed with the commands: + +``` +go get golang.org/x/net/websocket +go get golang.org/x/net/proxy +``` + + +Usage and API +------------- + +Detailed API documentation is available by using to godoc tool, or can be browsed online +using the [godoc.org](http://godoc.org/github.com/eclipse/paho.mqtt.golang) service. + +Make use of the library by importing it in your Go client source code. For example, +``` +import "github.com/eclipse/paho.mqtt.golang" +``` + +Samples are available in the `cmd` directory for reference. + + +Runtime tracing +--------------- + +Tracing is enabled by assigning logs (from the Go log package) to the logging endpoints, ERROR, CRITICAL, WARN and DEBUG + + +Reporting bugs +-------------- + +Please report bugs by raising issues for this project in github https://github.com/eclipse/paho.mqtt.golang/issues + + +More information +---------------- + +Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev). + +General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt). + +There is much more information available via the [MQTT community site](http://mqtt.org). diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/about.html b/vendor/github.com/eclipse/paho.mqtt.golang/about.html new file mode 100644 index 00000000..b183f417 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/about.html @@ -0,0 +1,41 @@ + + + +About + + +

About This Content

+ +

December 9, 2013

+

License

+ +

The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise +indicated below, the Content is provided to you under the terms and conditions of the +Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL"). +A copy of the EPL is available at +http://www.eclipse.org/legal/epl-v10.html +and a copy of the EDL is available at +http://www.eclipse.org/org/documents/edl-v10.php. +For purposes of the EPL, "Program" will mean the Content.

+ +

If you did not receive this Content directly from the Eclipse Foundation, the Content is +being redistributed by another party ("Redistributor") and different terms and conditions may +apply to your use of any object code in the Content. Check the Redistributor's license that was +provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise +indicated below, the terms and conditions of the EPL still apply to any source code in the Content +and such source code may be obtained at http://www.eclipse.org.

+ + +

Third Party Content

+

The Content includes items that have been sourced from third parties as set out below. If you + did not receive this Content directly from the Eclipse Foundation, the following is provided + for informational purposes only, and you should look to the Redistributor's license for + terms and conditions of use.

+

+ None

+

+

+ + + + diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/client.go b/vendor/github.com/eclipse/paho.mqtt.golang/client.go new file mode 100644 index 00000000..24d56c1f --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/client.go @@ -0,0 +1,759 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +// Portions copyright © 2018 TIBCO Software Inc. + +// Package mqtt provides an MQTT v3.1.1 client library. +package mqtt + +import ( + "errors" + "fmt" + "net" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +const ( + disconnected uint32 = iota + connecting + reconnecting + connected +) + +// Client is the interface definition for a Client as used by this +// library, the interface is primarily to allow mocking tests. +// +// It is an MQTT v3.1.1 client for communicating +// with an MQTT server using non-blocking methods that allow work +// to be done in the background. +// An application may connect to an MQTT server using: +// A plain TCP socket +// A secure SSL/TLS socket +// A websocket +// To enable ensured message delivery at Quality of Service (QoS) levels +// described in the MQTT spec, a message persistence mechanism must be +// used. This is done by providing a type which implements the Store +// interface. For convenience, FileStore and MemoryStore are provided +// implementations that should be sufficient for most use cases. More +// information can be found in their respective documentation. +// Numerous connection options may be specified by configuring a +// and then supplying a ClientOptions type. +type Client interface { + // IsConnected returns a bool signifying whether + // the client is connected or not. + IsConnected() bool + // IsConnectionOpen return a bool signifying wether the client has an active + // connection to mqtt broker, i.e not in disconnected or reconnect mode + IsConnectionOpen() bool + // Connect will create a connection to the message broker, by default + // it will attempt to connect at v3.1.1 and auto retry at v3.1 if that + // fails + Connect() Token + // Disconnect will end the connection with the server, but not before waiting + // the specified number of milliseconds to wait for existing work to be + // completed. + Disconnect(quiesce uint) + // Publish will publish a message with the specified QoS and content + // to the specified topic. + // Returns a token to track delivery of the message to the broker + Publish(topic string, qos byte, retained bool, payload interface{}) Token + // Subscribe starts a new subscription. Provide a MessageHandler to be executed when + // a message is published on the topic provided, or nil for the default handler + Subscribe(topic string, qos byte, callback MessageHandler) Token + // SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to + // be executed when a message is published on one of the topics provided, or nil for the + // default handler + SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token + // Unsubscribe will end the subscription from each of the topics provided. + // Messages published to those topics from other clients will no longer be + // received. + Unsubscribe(topics ...string) Token + // AddRoute allows you to add a handler for messages on a specific topic + // without making a subscription. For example having a different handler + // for parts of a wildcard subscription + AddRoute(topic string, callback MessageHandler) + // OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions + // in use by the client. + OptionsReader() ClientOptionsReader +} + +// client implements the Client interface +type client struct { + lastSent atomic.Value + lastReceived atomic.Value + pingOutstanding int32 + status uint32 + sync.RWMutex + messageIds + conn net.Conn + ibound chan packets.ControlPacket + obound chan *PacketAndToken + oboundP chan *PacketAndToken + msgRouter *router + stopRouter chan bool + incomingPubChan chan *packets.PublishPacket + errors chan error + stop chan struct{} + persist Store + options ClientOptions + workers sync.WaitGroup +} + +// NewClient will create an MQTT v3.1.1 client with all of the options specified +// in the provided ClientOptions. The client must have the Connect method called +// on it before it may be used. This is to make sure resources (such as a net +// connection) are created before the application is actually ready. +func NewClient(o *ClientOptions) Client { + c := &client{} + c.options = *o + + if c.options.Store == nil { + c.options.Store = NewMemoryStore() + } + switch c.options.ProtocolVersion { + case 3, 4: + c.options.protocolVersionExplicit = true + case 0x83, 0x84: + c.options.protocolVersionExplicit = true + default: + c.options.ProtocolVersion = 4 + c.options.protocolVersionExplicit = false + } + c.persist = c.options.Store + c.status = disconnected + c.messageIds = messageIds{index: make(map[uint16]tokenCompletor)} + c.msgRouter, c.stopRouter = newRouter() + c.msgRouter.setDefaultHandler(c.options.DefaultPublishHandler) + if !c.options.AutoReconnect { + c.options.MessageChannelDepth = 0 + } + return c +} + +// AddRoute allows you to add a handler for messages on a specific topic +// without making a subscription. For example having a different handler +// for parts of a wildcard subscription +func (c *client) AddRoute(topic string, callback MessageHandler) { + if callback != nil { + c.msgRouter.addRoute(topic, callback) + } +} + +// IsConnected returns a bool signifying whether +// the client is connected or not. +func (c *client) IsConnected() bool { + c.RLock() + defer c.RUnlock() + status := atomic.LoadUint32(&c.status) + switch { + case status == connected: + return true + case c.options.AutoReconnect && status > connecting: + return true + default: + return false + } +} + +// IsConnectionOpen return a bool signifying whether the client has an active +// connection to mqtt broker, i.e not in disconnected or reconnect mode +func (c *client) IsConnectionOpen() bool { + c.RLock() + defer c.RUnlock() + status := atomic.LoadUint32(&c.status) + switch { + case status == connected: + return true + default: + return false + } +} + +func (c *client) connectionStatus() uint32 { + c.RLock() + defer c.RUnlock() + status := atomic.LoadUint32(&c.status) + return status +} + +func (c *client) setConnected(status uint32) { + c.Lock() + defer c.Unlock() + atomic.StoreUint32(&c.status, uint32(status)) +} + +//ErrNotConnected is the error returned from function calls that are +//made when the client is not connected to a broker +var ErrNotConnected = errors.New("Not Connected") + +// Connect will create a connection to the message broker, by default +// it will attempt to connect at v3.1.1 and auto retry at v3.1 if that +// fails +func (c *client) Connect() Token { + var err error + t := newToken(packets.Connect).(*ConnectToken) + DEBUG.Println(CLI, "Connect()") + + c.obound = make(chan *PacketAndToken, c.options.MessageChannelDepth) + c.oboundP = make(chan *PacketAndToken, c.options.MessageChannelDepth) + c.ibound = make(chan packets.ControlPacket) + + go func() { + c.persist.Open() + + c.setConnected(connecting) + c.errors = make(chan error, 1) + c.stop = make(chan struct{}) + + var rc byte + protocolVersion := c.options.ProtocolVersion + + if len(c.options.Servers) == 0 { + t.setError(fmt.Errorf("No servers defined to connect to")) + return + } + + for _, broker := range c.options.Servers { + cm := newConnectMsgFromOptions(&c.options, broker) + c.options.ProtocolVersion = protocolVersion + CONN: + DEBUG.Println(CLI, "about to write new connect msg") + c.conn, err = openConnection(broker, c.options.TLSConfig, c.options.ConnectTimeout, c.options.HTTPHeaders) + if err == nil { + DEBUG.Println(CLI, "socket connected to broker") + switch c.options.ProtocolVersion { + case 3: + DEBUG.Println(CLI, "Using MQTT 3.1 protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 3 + case 0x83: + DEBUG.Println(CLI, "Using MQTT 3.1b protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 0x83 + case 0x84: + DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol") + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 0x84 + default: + DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") + c.options.ProtocolVersion = 4 + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 4 + } + cm.Write(c.conn) + + rc, t.sessionPresent = c.connect() + if rc != packets.Accepted { + if c.conn != nil { + c.conn.Close() + c.conn = nil + } + //if the protocol version was explicitly set don't do any fallback + if c.options.protocolVersionExplicit { + ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc]) + continue + } + if c.options.ProtocolVersion == 4 { + DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") + c.options.ProtocolVersion = 3 + goto CONN + } + } + break + } else { + ERROR.Println(CLI, err.Error()) + WARN.Println(CLI, "failed to connect to broker, trying next") + rc = packets.ErrNetworkError + } + } + + if c.conn == nil { + ERROR.Println(CLI, "Failed to connect to a broker") + c.setConnected(disconnected) + c.persist.Close() + t.returnCode = rc + if rc != packets.ErrNetworkError { + t.setError(packets.ConnErrors[rc]) + } else { + t.setError(fmt.Errorf("%s : %s", packets.ConnErrors[rc], err)) + } + return + } + + c.options.protocolVersionExplicit = true + + if c.options.KeepAlive != 0 { + atomic.StoreInt32(&c.pingOutstanding, 0) + c.lastReceived.Store(time.Now()) + c.lastSent.Store(time.Now()) + c.workers.Add(1) + go keepalive(c) + } + + c.incomingPubChan = make(chan *packets.PublishPacket, c.options.MessageChannelDepth) + c.msgRouter.matchAndDispatch(c.incomingPubChan, c.options.Order, c) + + c.setConnected(connected) + DEBUG.Println(CLI, "client is connected") + if c.options.OnConnect != nil { + go c.options.OnConnect(c) + } + + c.workers.Add(4) + go errorWatch(c) + go alllogic(c) + go outgoing(c) + go incoming(c) + + // Take care of any messages in the store + if c.options.CleanSession == false { + c.resume(c.options.ResumeSubs) + } else { + c.persist.Reset() + } + + DEBUG.Println(CLI, "exit startClient") + t.flowComplete() + }() + return t +} + +// internal function used to reconnect the client when it loses its connection +func (c *client) reconnect() { + DEBUG.Println(CLI, "enter reconnect") + var ( + err error + + rc = byte(1) + sleep = time.Duration(1 * time.Second) + ) + + for rc != 0 && atomic.LoadUint32(&c.status) != disconnected { + for _, broker := range c.options.Servers { + cm := newConnectMsgFromOptions(&c.options, broker) + DEBUG.Println(CLI, "about to write new connect msg") + c.Lock() + c.conn, err = openConnection(broker, c.options.TLSConfig, c.options.ConnectTimeout, c.options.HTTPHeaders) + c.Unlock() + if err == nil { + DEBUG.Println(CLI, "socket connected to broker") + switch c.options.ProtocolVersion { + case 0x83: + DEBUG.Println(CLI, "Using MQTT 3.1b protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 0x83 + case 0x84: + DEBUG.Println(CLI, "Using MQTT 3.1.1b protocol") + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 0x84 + case 3: + DEBUG.Println(CLI, "Using MQTT 3.1 protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 3 + default: + DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 4 + } + cm.Write(c.conn) + + rc, _ = c.connect() + if rc != packets.Accepted { + c.conn.Close() + c.conn = nil + //if the protocol version was explicitly set don't do any fallback + if c.options.protocolVersionExplicit { + ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not Accepted, but rather", packets.ConnackReturnCodes[rc]) + continue + } + } + break + } else { + ERROR.Println(CLI, err.Error()) + WARN.Println(CLI, "failed to connect to broker, trying next") + rc = packets.ErrNetworkError + } + } + if rc != 0 { + DEBUG.Println(CLI, "Reconnect failed, sleeping for", int(sleep.Seconds()), "seconds") + time.Sleep(sleep) + if sleep < c.options.MaxReconnectInterval { + sleep *= 2 + } + + if sleep > c.options.MaxReconnectInterval { + sleep = c.options.MaxReconnectInterval + } + } + } + // Disconnect() must have been called while we were trying to reconnect. + if c.connectionStatus() == disconnected { + DEBUG.Println(CLI, "Client moved to disconnected state while reconnecting, abandoning reconnect") + return + } + + c.stop = make(chan struct{}) + + if c.options.KeepAlive != 0 { + atomic.StoreInt32(&c.pingOutstanding, 0) + c.lastReceived.Store(time.Now()) + c.lastSent.Store(time.Now()) + c.workers.Add(1) + go keepalive(c) + } + + c.setConnected(connected) + DEBUG.Println(CLI, "client is reconnected") + if c.options.OnConnect != nil { + go c.options.OnConnect(c) + } + + c.workers.Add(4) + go errorWatch(c) + go alllogic(c) + go outgoing(c) + go incoming(c) + + c.resume(false) +} + +// This function is only used for receiving a connack +// when the connection is first started. +// This prevents receiving incoming data while resume +// is in progress if clean session is false. +func (c *client) connect() (byte, bool) { + DEBUG.Println(NET, "connect started") + + ca, err := packets.ReadPacket(c.conn) + if err != nil { + ERROR.Println(NET, "connect got error", err) + return packets.ErrNetworkError, false + } + if ca == nil { + ERROR.Println(NET, "received nil packet") + return packets.ErrNetworkError, false + } + + msg, ok := ca.(*packets.ConnackPacket) + if !ok { + ERROR.Println(NET, "received msg that was not CONNACK") + return packets.ErrNetworkError, false + } + + DEBUG.Println(NET, "received connack") + return msg.ReturnCode, msg.SessionPresent +} + +// Disconnect will end the connection with the server, but not before waiting +// the specified number of milliseconds to wait for existing work to be +// completed. +func (c *client) Disconnect(quiesce uint) { + status := atomic.LoadUint32(&c.status) + if status == connected { + DEBUG.Println(CLI, "disconnecting") + c.setConnected(disconnected) + + dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket) + dt := newToken(packets.Disconnect) + c.oboundP <- &PacketAndToken{p: dm, t: dt} + + // wait for work to finish, or quiesce time consumed + dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond) + } else { + WARN.Println(CLI, "Disconnect() called but not connected (disconnected/reconnecting)") + c.setConnected(disconnected) + } + + c.disconnect() +} + +// ForceDisconnect will end the connection with the mqtt broker immediately. +func (c *client) forceDisconnect() { + if !c.IsConnected() { + WARN.Println(CLI, "already disconnected") + return + } + c.setConnected(disconnected) + c.conn.Close() + DEBUG.Println(CLI, "forcefully disconnecting") + c.disconnect() +} + +func (c *client) internalConnLost(err error) { + // Only do anything if this was called and we are still "connected" + // forceDisconnect can cause incoming/outgoing/alllogic to end with + // error from closing the socket but state will be "disconnected" + if c.IsConnected() { + c.closeStop() + c.conn.Close() + c.workers.Wait() + if c.options.CleanSession && !c.options.AutoReconnect { + c.messageIds.cleanUp() + } + if c.options.AutoReconnect { + c.setConnected(reconnecting) + go c.reconnect() + } else { + c.setConnected(disconnected) + } + if c.options.OnConnectionLost != nil { + go c.options.OnConnectionLost(c, err) + } + } +} + +func (c *client) closeStop() { + c.Lock() + defer c.Unlock() + select { + case <-c.stop: + DEBUG.Println("In disconnect and stop channel is already closed") + default: + if c.stop != nil { + close(c.stop) + } + } +} + +func (c *client) closeStopRouter() { + c.Lock() + defer c.Unlock() + select { + case <-c.stopRouter: + DEBUG.Println("In disconnect and stop channel is already closed") + default: + if c.stopRouter != nil { + close(c.stopRouter) + } + } +} + +func (c *client) closeConn() { + c.Lock() + defer c.Unlock() + if c.conn != nil { + c.conn.Close() + } +} + +func (c *client) disconnect() { + c.closeStop() + c.closeConn() + c.workers.Wait() + c.messageIds.cleanUp() + c.closeStopRouter() + DEBUG.Println(CLI, "disconnected") + c.persist.Close() +} + +// Publish will publish a message with the specified QoS and content +// to the specified topic. +// Returns a token to track delivery of the message to the broker +func (c *client) Publish(topic string, qos byte, retained bool, payload interface{}) Token { + token := newToken(packets.Publish).(*PublishToken) + DEBUG.Println(CLI, "enter Publish") + switch { + case !c.IsConnected(): + token.setError(ErrNotConnected) + return token + case c.connectionStatus() == reconnecting && qos == 0: + token.flowComplete() + return token + } + pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pub.Qos = qos + pub.TopicName = topic + pub.Retain = retained + switch payload.(type) { + case string: + pub.Payload = []byte(payload.(string)) + case []byte: + pub.Payload = payload.([]byte) + default: + token.setError(fmt.Errorf("Unknown payload type")) + return token + } + + if pub.Qos != 0 && pub.MessageID == 0 { + pub.MessageID = c.getID(token) + token.messageID = pub.MessageID + } + persistOutbound(c.persist, pub) + if c.connectionStatus() == reconnecting { + DEBUG.Println(CLI, "storing publish message (reconnecting), topic:", topic) + } else { + DEBUG.Println(CLI, "sending publish message, topic:", topic) + c.obound <- &PacketAndToken{p: pub, t: token} + } + return token +} + +// Subscribe starts a new subscription. Provide a MessageHandler to be executed when +// a message is published on the topic provided. +func (c *client) Subscribe(topic string, qos byte, callback MessageHandler) Token { + token := newToken(packets.Subscribe).(*SubscribeToken) + DEBUG.Println(CLI, "enter Subscribe") + if !c.IsConnected() { + token.setError(ErrNotConnected) + return token + } + sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) + if err := validateTopicAndQos(topic, qos); err != nil { + token.setError(err) + return token + } + sub.Topics = append(sub.Topics, topic) + sub.Qoss = append(sub.Qoss, qos) + DEBUG.Println(CLI, sub.String()) + + if strings.HasPrefix(topic, "$share") { + topic = strings.Join(strings.Split(topic, "/")[2:], "/") + } + + if callback != nil { + c.msgRouter.addRoute(topic, callback) + } + + token.subs = append(token.subs, topic) + c.oboundP <- &PacketAndToken{p: sub, t: token} + DEBUG.Println(CLI, "exit Subscribe") + return token +} + +// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to +// be executed when a message is published on one of the topics provided. +func (c *client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token { + var err error + token := newToken(packets.Subscribe).(*SubscribeToken) + DEBUG.Println(CLI, "enter SubscribeMultiple") + if !c.IsConnected() { + token.setError(ErrNotConnected) + return token + } + sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) + if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil { + token.setError(err) + return token + } + + if callback != nil { + for topic := range filters { + c.msgRouter.addRoute(topic, callback) + } + } + token.subs = make([]string, len(sub.Topics)) + copy(token.subs, sub.Topics) + c.oboundP <- &PacketAndToken{p: sub, t: token} + DEBUG.Println(CLI, "exit SubscribeMultiple") + return token +} + +// Load all stored messages and resend them +// Call this to ensure QOS > 1,2 even after an application crash +func (c *client) resume(subscription bool) { + + storedKeys := c.persist.All() + for _, key := range storedKeys { + packet := c.persist.Get(key) + if packet == nil { + continue + } + details := packet.Details() + if isKeyOutbound(key) { + switch packet.(type) { + case *packets.SubscribePacket: + if subscription { + DEBUG.Println(STR, fmt.Sprintf("loaded pending subscribe (%d)", details.MessageID)) + token := newToken(packets.Subscribe).(*SubscribeToken) + c.oboundP <- &PacketAndToken{p: packet, t: token} + } + case *packets.UnsubscribePacket: + if subscription { + DEBUG.Println(STR, fmt.Sprintf("loaded pending unsubscribe (%d)", details.MessageID)) + token := newToken(packets.Unsubscribe).(*UnsubscribeToken) + c.oboundP <- &PacketAndToken{p: packet, t: token} + } + case *packets.PubrelPacket: + DEBUG.Println(STR, fmt.Sprintf("loaded pending pubrel (%d)", details.MessageID)) + select { + case c.oboundP <- &PacketAndToken{p: packet, t: nil}: + case <-c.stop: + } + case *packets.PublishPacket: + token := newToken(packets.Publish).(*PublishToken) + token.messageID = details.MessageID + c.claimID(token, details.MessageID) + DEBUG.Println(STR, fmt.Sprintf("loaded pending publish (%d)", details.MessageID)) + DEBUG.Println(STR, details) + c.obound <- &PacketAndToken{p: packet, t: token} + default: + ERROR.Println(STR, "invalid message type in store (discarded)") + c.persist.Del(key) + } + } else { + switch packet.(type) { + case *packets.PubrelPacket, *packets.PublishPacket: + DEBUG.Println(STR, fmt.Sprintf("loaded pending incomming (%d)", details.MessageID)) + select { + case c.ibound <- packet: + case <-c.stop: + } + default: + ERROR.Println(STR, "invalid message type in store (discarded)") + c.persist.Del(key) + } + } + } +} + +// Unsubscribe will end the subscription from each of the topics provided. +// Messages published to those topics from other clients will no longer be +// received. +func (c *client) Unsubscribe(topics ...string) Token { + token := newToken(packets.Unsubscribe).(*UnsubscribeToken) + DEBUG.Println(CLI, "enter Unsubscribe") + if !c.IsConnected() { + token.setError(ErrNotConnected) + return token + } + unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket) + unsub.Topics = make([]string, len(topics)) + copy(unsub.Topics, topics) + + c.oboundP <- &PacketAndToken{p: unsub, t: token} + for _, topic := range topics { + c.msgRouter.deleteRoute(topic) + } + + DEBUG.Println(CLI, "exit Unsubscribe") + return token +} + +// OptionsReader returns a ClientOptionsReader which is a copy of the clientoptions +// in use by the client. +func (c *client) OptionsReader() ClientOptionsReader { + r := ClientOptionsReader{options: &c.options} + return r +} + +//DefaultConnectionLostHandler is a definition of a function that simply +//reports to the DEBUG log the reason for the client losing a connection. +func DefaultConnectionLostHandler(client Client, reason error) { + DEBUG.Println("Connection lost:", reason.Error()) +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/components.go b/vendor/github.com/eclipse/paho.mqtt.golang/components.go new file mode 100644 index 00000000..01f5fafd --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/components.go @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +type component string + +// Component names for debug output +const ( + NET component = "[net] " + PNG component = "[pinger] " + CLI component = "[client] " + DEC component = "[decode] " + MES component = "[message] " + STR component = "[store] " + MID component = "[msgids] " + TST component = "[test] " + STA component = "[state] " + ERR component = "[error] " +) diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 new file mode 100644 index 00000000..cf989f14 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/edl-v10 @@ -0,0 +1,15 @@ + +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 new file mode 100644 index 00000000..79e486c3 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/epl-v10 @@ -0,0 +1,70 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and +b) in the case of each subsequent Contributor: +i) changes to the Program, and +ii) additions to the Program; +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and +b) its license agreement: +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and +b) a copy of this Agreement must be included with each copy of the Program. +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go new file mode 100644 index 00000000..c4a0d36b --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/filestore.go @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "io/ioutil" + "os" + "path" + "sort" + "sync" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +const ( + msgExt = ".msg" + tmpExt = ".tmp" + corruptExt = ".CORRUPT" +) + +// FileStore implements the store interface using the filesystem to provide +// true persistence, even across client failure. This is designed to use a +// single directory per running client. If you are running multiple clients +// on the same filesystem, you will need to be careful to specify unique +// store directories for each. +type FileStore struct { + sync.RWMutex + directory string + opened bool +} + +// NewFileStore will create a new FileStore which stores its messages in the +// directory provided. +func NewFileStore(directory string) *FileStore { + store := &FileStore{ + directory: directory, + opened: false, + } + return store +} + +// Open will allow the FileStore to be used. +func (store *FileStore) Open() { + store.Lock() + defer store.Unlock() + // if no store directory was specified in ClientOpts, by default use the + // current working directory + if store.directory == "" { + store.directory, _ = os.Getwd() + } + + // if store dir exists, great, otherwise, create it + if !exists(store.directory) { + perms := os.FileMode(0770) + merr := os.MkdirAll(store.directory, perms) + chkerr(merr) + } + store.opened = true + DEBUG.Println(STR, "store is opened at", store.directory) +} + +// Close will disallow the FileStore from being used. +func (store *FileStore) Close() { + store.Lock() + defer store.Unlock() + store.opened = false + DEBUG.Println(STR, "store is closed") +} + +// Put will put a message into the store, associated with the provided +// key value. +func (store *FileStore) Put(key string, m packets.ControlPacket) { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return + } + full := fullpath(store.directory, key) + write(store.directory, key, m) + if !exists(full) { + ERROR.Println(STR, "file not created:", full) + } +} + +// Get will retrieve a message from the store, the one associated with +// the provided key value. +func (store *FileStore) Get(key string) packets.ControlPacket { + store.RLock() + defer store.RUnlock() + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return nil + } + filepath := fullpath(store.directory, key) + if !exists(filepath) { + return nil + } + mfile, oerr := os.Open(filepath) + chkerr(oerr) + msg, rerr := packets.ReadPacket(mfile) + chkerr(mfile.Close()) + + // Message was unreadable, return nil + if rerr != nil { + newpath := corruptpath(store.directory, key) + WARN.Println(STR, "corrupted file detected:", rerr.Error(), "archived at:", newpath) + os.Rename(filepath, newpath) + return nil + } + return msg +} + +// All will provide a list of all of the keys associated with messages +// currenly residing in the FileStore. +func (store *FileStore) All() []string { + store.RLock() + defer store.RUnlock() + return store.all() +} + +// Del will remove the persisted message associated with the provided +// key from the FileStore. +func (store *FileStore) Del(key string) { + store.Lock() + defer store.Unlock() + store.del(key) +} + +// Reset will remove all persisted messages from the FileStore. +func (store *FileStore) Reset() { + store.Lock() + defer store.Unlock() + WARN.Println(STR, "FileStore Reset") + for _, key := range store.all() { + store.del(key) + } +} + +// lockless +func (store *FileStore) all() []string { + var err error + var keys []string + var files fileInfos + + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return nil + } + + files, err = ioutil.ReadDir(store.directory) + chkerr(err) + sort.Sort(files) + for _, f := range files { + DEBUG.Println(STR, "file in All():", f.Name()) + name := f.Name() + if name[len(name)-4:len(name)] != msgExt { + DEBUG.Println(STR, "skipping file, doesn't have right extension: ", name) + continue + } + key := name[0 : len(name)-4] // remove file extension + keys = append(keys, key) + } + return keys +} + +// lockless +func (store *FileStore) del(key string) { + if !store.opened { + ERROR.Println(STR, "Trying to use file store, but not open") + return + } + DEBUG.Println(STR, "store del filepath:", store.directory) + DEBUG.Println(STR, "store delete key:", key) + filepath := fullpath(store.directory, key) + DEBUG.Println(STR, "path of deletion:", filepath) + if !exists(filepath) { + WARN.Println(STR, "store could not delete key:", key) + return + } + rerr := os.Remove(filepath) + chkerr(rerr) + DEBUG.Println(STR, "del msg:", key) + if exists(filepath) { + ERROR.Println(STR, "file not deleted:", filepath) + } +} + +func fullpath(store string, key string) string { + p := path.Join(store, key+msgExt) + return p +} + +func tmppath(store string, key string) string { + p := path.Join(store, key+tmpExt) + return p +} + +func corruptpath(store string, key string) string { + p := path.Join(store, key+corruptExt) + return p +} + +// create file called "X.[messageid].tmp" located in the store +// the contents of the file is the bytes of the message, then +// rename it to "X.[messageid].msg", overwriting any existing +// message with the same id +// X will be 'i' for inbound messages, and O for outbound messages +func write(store, key string, m packets.ControlPacket) { + temppath := tmppath(store, key) + f, err := os.Create(temppath) + chkerr(err) + werr := m.Write(f) + chkerr(werr) + cerr := f.Close() + chkerr(cerr) + rerr := os.Rename(temppath, fullpath(store, key)) + chkerr(rerr) +} + +func exists(file string) bool { + if _, err := os.Stat(file); err != nil { + if os.IsNotExist(err) { + return false + } + chkerr(err) + } + return true +} + +type fileInfos []os.FileInfo + +func (f fileInfos) Len() int { + return len(f) +} + +func (f fileInfos) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +func (f fileInfos) Less(i, j int) bool { + return f[i].ModTime().Before(f[j].ModTime()) +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go new file mode 100644 index 00000000..499c490b --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/memstore.go @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "sync" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +// MemoryStore implements the store interface to provide a "persistence" +// mechanism wholly stored in memory. This is only useful for +// as long as the client instance exists. +type MemoryStore struct { + sync.RWMutex + messages map[string]packets.ControlPacket + opened bool +} + +// NewMemoryStore returns a pointer to a new instance of +// MemoryStore, the instance is not initialized and ready to +// use until Open() has been called on it. +func NewMemoryStore() *MemoryStore { + store := &MemoryStore{ + messages: make(map[string]packets.ControlPacket), + opened: false, + } + return store +} + +// Open initializes a MemoryStore instance. +func (store *MemoryStore) Open() { + store.Lock() + defer store.Unlock() + store.opened = true + DEBUG.Println(STR, "memorystore initialized") +} + +// Put takes a key and a pointer to a Message and stores the +// message. +func (store *MemoryStore) Put(key string, message packets.ControlPacket) { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return + } + store.messages[key] = message +} + +// Get takes a key and looks in the store for a matching Message +// returning either the Message pointer or nil. +func (store *MemoryStore) Get(key string) packets.ControlPacket { + store.RLock() + defer store.RUnlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return nil + } + mid := mIDFromKey(key) + m := store.messages[key] + if m == nil { + CRITICAL.Println(STR, "memorystore get: message", mid, "not found") + } else { + DEBUG.Println(STR, "memorystore get: message", mid, "found") + } + return m +} + +// All returns a slice of strings containing all the keys currently +// in the MemoryStore. +func (store *MemoryStore) All() []string { + store.RLock() + defer store.RUnlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return nil + } + keys := []string{} + for k := range store.messages { + keys = append(keys, k) + } + return keys +} + +// Del takes a key, searches the MemoryStore and if the key is found +// deletes the Message pointer associated with it. +func (store *MemoryStore) Del(key string) { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to use memory store, but not open") + return + } + mid := mIDFromKey(key) + m := store.messages[key] + if m == nil { + WARN.Println(STR, "memorystore del: message", mid, "not found") + } else { + delete(store.messages, key) + DEBUG.Println(STR, "memorystore del: message", mid, "was deleted") + } +} + +// Close will disallow modifications to the state of the store. +func (store *MemoryStore) Close() { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to close memory store, but not open") + return + } + store.opened = false + DEBUG.Println(STR, "memorystore closed") +} + +// Reset eliminates all persisted message data in the store. +func (store *MemoryStore) Reset() { + store.Lock() + defer store.Unlock() + if !store.opened { + ERROR.Println(STR, "Trying to reset memory store, but not open") + } + store.messages = make(map[string]packets.ControlPacket) + WARN.Println(STR, "memorystore wiped") +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/message.go b/vendor/github.com/eclipse/paho.mqtt.golang/message.go new file mode 100644 index 00000000..903e5dcf --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/message.go @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "net/url" + + "github.com/eclipse/paho.mqtt.golang/packets" + "sync" +) + +// Message defines the externals that a message implementation must support +// these are received messages that are passed to the callbacks, not internal +// messages +type Message interface { + Duplicate() bool + Qos() byte + Retained() bool + Topic() string + MessageID() uint16 + Payload() []byte + Ack() +} + +type message struct { + duplicate bool + qos byte + retained bool + topic string + messageID uint16 + payload []byte + once sync.Once + ack func() +} + +func (m *message) Duplicate() bool { + return m.duplicate +} + +func (m *message) Qos() byte { + return m.qos +} + +func (m *message) Retained() bool { + return m.retained +} + +func (m *message) Topic() string { + return m.topic +} + +func (m *message) MessageID() uint16 { + return m.messageID +} + +func (m *message) Payload() []byte { + return m.payload +} + +func (m *message) Ack() { + m.once.Do(m.ack) +} + +func messageFromPublish(p *packets.PublishPacket, ack func()) Message { + return &message{ + duplicate: p.Dup, + qos: p.Qos, + retained: p.Retain, + topic: p.TopicName, + messageID: p.MessageID, + payload: p.Payload, + ack: ack, + } +} + +func newConnectMsgFromOptions(options *ClientOptions, broker *url.URL) *packets.ConnectPacket { + m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket) + + m.CleanSession = options.CleanSession + m.WillFlag = options.WillEnabled + m.WillRetain = options.WillRetained + m.ClientIdentifier = options.ClientID + + if options.WillEnabled { + m.WillQos = options.WillQos + m.WillTopic = options.WillTopic + m.WillMessage = options.WillPayload + } + + username := options.Username + password := options.Password + if broker.User != nil { + username = broker.User.Username() + if pwd, ok := broker.User.Password(); ok { + password = pwd + } + } + if options.CredentialsProvider != nil { + username, password = options.CredentialsProvider() + } + + if username != "" { + m.UsernameFlag = true + m.Username = username + //mustn't have password without user as well + if password != "" { + m.PasswordFlag = true + m.Password = []byte(password) + } + } + + m.Keepalive = uint16(options.KeepAlive) + + return m +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go new file mode 100644 index 00000000..9a5fa9fd --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/messageids.go @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "fmt" + "sync" + "time" +) + +// MId is 16 bit message id as specified by the MQTT spec. +// In general, these values should not be depended upon by +// the client application. +type MId uint16 + +type messageIds struct { + sync.RWMutex + index map[uint16]tokenCompletor +} + +const ( + midMin uint16 = 1 + midMax uint16 = 65535 +) + +func (mids *messageIds) cleanUp() { + mids.Lock() + for _, token := range mids.index { + switch token.(type) { + case *PublishToken: + token.setError(fmt.Errorf("Connection lost before Publish completed")) + case *SubscribeToken: + token.setError(fmt.Errorf("Connection lost before Subscribe completed")) + case *UnsubscribeToken: + token.setError(fmt.Errorf("Connection lost before Unsubscribe completed")) + case nil: + continue + } + token.flowComplete() + } + mids.index = make(map[uint16]tokenCompletor) + mids.Unlock() + DEBUG.Println(MID, "cleaned up") +} + +func (mids *messageIds) freeID(id uint16) { + mids.Lock() + delete(mids.index, id) + mids.Unlock() +} + +func (mids *messageIds) claimID(token tokenCompletor, id uint16) { + mids.Lock() + defer mids.Unlock() + if _, ok := mids.index[id]; !ok { + mids.index[id] = token + } else { + old := mids.index[id] + old.flowComplete() + mids.index[id] = token + } +} + +func (mids *messageIds) getID(t tokenCompletor) uint16 { + mids.Lock() + defer mids.Unlock() + for i := midMin; i < midMax; i++ { + if _, ok := mids.index[i]; !ok { + mids.index[i] = t + return i + } + } + return 0 +} + +func (mids *messageIds) getToken(id uint16) tokenCompletor { + mids.RLock() + defer mids.RUnlock() + if token, ok := mids.index[id]; ok { + return token + } + return &DummyToken{id: id} +} + +type DummyToken struct { + id uint16 +} + +func (d *DummyToken) Wait() bool { + return true +} + +func (d *DummyToken) WaitTimeout(t time.Duration) bool { + return true +} + +func (d *DummyToken) flowComplete() { + ERROR.Printf("A lookup for token %d returned nil\n", d.id) +} + +func (d *DummyToken) Error() error { + return nil +} + +func (d *DummyToken) setError(e error) {} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/net.go b/vendor/github.com/eclipse/paho.mqtt.golang/net.go new file mode 100644 index 00000000..3e6366be --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/net.go @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "os" + "reflect" + "sync/atomic" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" + "golang.org/x/net/proxy" + "golang.org/x/net/websocket" +) + +func signalError(c chan<- error, err error) { + select { + case c <- err: + default: + } +} + +func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration, headers http.Header) (net.Conn, error) { + switch uri.Scheme { + case "ws": + config, _ := websocket.NewConfig(uri.String(), fmt.Sprintf("http://%s", uri.Host)) + config.Protocol = []string{"mqtt"} + config.Header = headers + config.Dialer = &net.Dialer{Timeout: timeout} + conn, err := websocket.DialConfig(config) + if err != nil { + return nil, err + } + conn.PayloadType = websocket.BinaryFrame + return conn, err + case "wss": + config, _ := websocket.NewConfig(uri.String(), fmt.Sprintf("https://%s", uri.Host)) + config.Protocol = []string{"mqtt"} + config.TlsConfig = tlsc + config.Header = headers + config.Dialer = &net.Dialer{Timeout: timeout} + conn, err := websocket.DialConfig(config) + if err != nil { + return nil, err + } + conn.PayloadType = websocket.BinaryFrame + return conn, err + case "tcp": + allProxy := os.Getenv("all_proxy") + if len(allProxy) == 0 { + conn, err := net.DialTimeout("tcp", uri.Host, timeout) + if err != nil { + return nil, err + } + return conn, nil + } + proxyDialer := proxy.FromEnvironment() + + conn, err := proxyDialer.Dial("tcp", uri.Host) + if err != nil { + return nil, err + } + return conn, nil + case "unix": + conn, err := net.DialTimeout("unix", uri.Host, timeout) + if err != nil { + return nil, err + } + return conn, nil + case "ssl": + fallthrough + case "tls": + fallthrough + case "tcps": + allProxy := os.Getenv("all_proxy") + if len(allProxy) == 0 { + conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc) + if err != nil { + return nil, err + } + return conn, nil + } + proxyDialer := proxy.FromEnvironment() + + conn, err := proxyDialer.Dial("tcp", uri.Host) + if err != nil { + return nil, err + } + + tlsConn := tls.Client(conn, tlsc) + + err = tlsConn.Handshake() + if err != nil { + conn.Close() + return nil, err + } + + return tlsConn, nil + } + return nil, errors.New("Unknown protocol") +} + +// actually read incoming messages off the wire +// send Message object into ibound channel +func incoming(c *client) { + var err error + var cp packets.ControlPacket + + defer c.workers.Done() + + DEBUG.Println(NET, "incoming started") + + for { + if cp, err = packets.ReadPacket(c.conn); err != nil { + break + } + DEBUG.Println(NET, "Received Message") + select { + case c.ibound <- cp: + // Notify keepalive logic that we recently received a packet + if c.options.KeepAlive != 0 { + c.lastReceived.Store(time.Now()) + } + case <-c.stop: + // This avoids a deadlock should a message arrive while shutting down. + // In that case the "reader" of c.ibound might already be gone + WARN.Println(NET, "incoming dropped a received message during shutdown") + break + } + } + // We received an error on read. + // If disconnect is in progress, swallow error and return + select { + case <-c.stop: + DEBUG.Println(NET, "incoming stopped") + return + // Not trying to disconnect, send the error to the errors channel + default: + ERROR.Println(NET, "incoming stopped with error", err) + signalError(c.errors, err) + return + } +} + +// receive a Message object on obound, and then +// actually send outgoing message to the wire +func outgoing(c *client) { + defer c.workers.Done() + DEBUG.Println(NET, "outgoing started") + + for { + DEBUG.Println(NET, "outgoing waiting for an outbound message") + select { + case <-c.stop: + DEBUG.Println(NET, "outgoing stopped") + return + case pub := <-c.obound: + msg := pub.p.(*packets.PublishPacket) + + if c.options.WriteTimeout > 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.options.WriteTimeout)) + } + + if err := msg.Write(c.conn); err != nil { + ERROR.Println(NET, "outgoing stopped with error", err) + pub.t.setError(err) + signalError(c.errors, err) + return + } + + if c.options.WriteTimeout > 0 { + // If we successfully wrote, we don't want the timeout to happen during an idle period + // so we reset it to infinite. + c.conn.SetWriteDeadline(time.Time{}) + } + + if msg.Qos == 0 { + pub.t.flowComplete() + } + DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID) + case msg := <-c.oboundP: + switch msg.p.(type) { + case *packets.SubscribePacket: + msg.p.(*packets.SubscribePacket).MessageID = c.getID(msg.t) + case *packets.UnsubscribePacket: + msg.p.(*packets.UnsubscribePacket).MessageID = c.getID(msg.t) + } + DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p)) + if err := msg.p.Write(c.conn); err != nil { + ERROR.Println(NET, "outgoing stopped with error", err) + if msg.t != nil { + msg.t.setError(err) + } + signalError(c.errors, err) + return + } + switch msg.p.(type) { + case *packets.DisconnectPacket: + msg.t.(*DisconnectToken).flowComplete() + DEBUG.Println(NET, "outbound wrote disconnect, stopping") + return + } + } + // Reset ping timer after sending control packet. + if c.options.KeepAlive != 0 { + c.lastSent.Store(time.Now()) + } + } +} + +// receive Message objects on ibound +// store messages if necessary +// send replies on obound +// delete messages from store if necessary +func alllogic(c *client) { + defer c.workers.Done() + DEBUG.Println(NET, "logic started") + + for { + DEBUG.Println(NET, "logic waiting for msg on ibound") + + select { + case msg := <-c.ibound: + DEBUG.Println(NET, "logic got msg on ibound") + persistInbound(c.persist, msg) + switch m := msg.(type) { + case *packets.PingrespPacket: + DEBUG.Println(NET, "received pingresp") + atomic.StoreInt32(&c.pingOutstanding, 0) + case *packets.SubackPacket: + DEBUG.Println(NET, "received suback, id:", m.MessageID) + token := c.getToken(m.MessageID) + switch t := token.(type) { + case *SubscribeToken: + DEBUG.Println(NET, "granted qoss", m.ReturnCodes) + for i, qos := range m.ReturnCodes { + t.subResult[t.subs[i]] = qos + } + } + token.flowComplete() + c.freeID(m.MessageID) + case *packets.UnsubackPacket: + DEBUG.Println(NET, "received unsuback, id:", m.MessageID) + c.getToken(m.MessageID).flowComplete() + c.freeID(m.MessageID) + case *packets.PublishPacket: + DEBUG.Println(NET, "received publish, msgId:", m.MessageID) + DEBUG.Println(NET, "putting msg on onPubChan") + switch m.Qos { + case 2: + c.incomingPubChan <- m + DEBUG.Println(NET, "done putting msg on incomingPubChan") + case 1: + c.incomingPubChan <- m + DEBUG.Println(NET, "done putting msg on incomingPubChan") + case 0: + select { + case c.incomingPubChan <- m: + case <-c.stop: + } + DEBUG.Println(NET, "done putting msg on incomingPubChan") + } + case *packets.PubackPacket: + DEBUG.Println(NET, "received puback, id:", m.MessageID) + // c.receipts.get(msg.MsgId()) <- Receipt{} + // c.receipts.end(msg.MsgId()) + c.getToken(m.MessageID).flowComplete() + c.freeID(m.MessageID) + case *packets.PubrecPacket: + DEBUG.Println(NET, "received pubrec, id:", m.MessageID) + prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) + prel.MessageID = m.MessageID + select { + case c.oboundP <- &PacketAndToken{p: prel, t: nil}: + case <-c.stop: + } + case *packets.PubrelPacket: + DEBUG.Println(NET, "received pubrel, id:", m.MessageID) + pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) + pc.MessageID = m.MessageID + persistOutbound(c.persist, pc) + select { + case c.oboundP <- &PacketAndToken{p: pc, t: nil}: + case <-c.stop: + } + case *packets.PubcompPacket: + DEBUG.Println(NET, "received pubcomp, id:", m.MessageID) + c.getToken(m.MessageID).flowComplete() + c.freeID(m.MessageID) + } + case <-c.stop: + WARN.Println(NET, "logic stopped") + return + } + } +} + +func (c *client) ackFunc(packet *packets.PublishPacket) func() { + return func() { + switch packet.Qos { + case 2: + pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) + pr.MessageID = packet.MessageID + DEBUG.Println(NET, "putting pubrec msg on obound") + select { + case c.oboundP <- &PacketAndToken{p: pr, t: nil}: + case <-c.stop: + } + DEBUG.Println(NET, "done putting pubrec msg on obound") + case 1: + pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) + pa.MessageID = packet.MessageID + DEBUG.Println(NET, "putting puback msg on obound") + persistOutbound(c.persist, pa) + select { + case c.oboundP <- &PacketAndToken{p: pa, t: nil}: + case <-c.stop: + } + DEBUG.Println(NET, "done putting puback msg on obound") + case 0: + // do nothing, since there is no need to send an ack packet back + } + } +} + +func errorWatch(c *client) { + defer c.workers.Done() + select { + case <-c.stop: + WARN.Println(NET, "errorWatch stopped") + return + case err := <-c.errors: + ERROR.Println(NET, "error triggered, stopping") + go c.internalConnLost(err) + return + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/notice.html b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html new file mode 100644 index 00000000..f19c483b --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/notice.html @@ -0,0 +1,108 @@ + + + + + +Eclipse Foundation Software User Agreement + + + +

Eclipse Foundation Software User Agreement

+

February 1, 2011

+ +

Usage Of Content

+ +

THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS + (COLLECTIVELY "CONTENT"). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND + CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE + OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR + NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND + CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.

+ +

Applicable Licenses

+ +

Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0 + ("EPL"). A copy of the EPL is provided with this Content and is also available at http://www.eclipse.org/legal/epl-v10.html. + For purposes of the EPL, "Program" will mean the Content.

+ +

Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code + repository ("Repository") in software modules ("Modules") and made available as downloadable archives ("Downloads").

+ +
    +
  • Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins ("Plug-ins"), plug-in fragments ("Fragments"), and features ("Features").
  • +
  • Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java™ ARchive) in a directory named "plugins".
  • +
  • A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named "features". Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of the Plug-ins + and/or Fragments associated with that Feature.
  • +
  • Features may also include other Features ("Included Features"). Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of Included Features.
  • +
+ +

The terms and conditions governing Plug-ins and Fragments should be contained in files named "about.html" ("Abouts"). The terms and conditions governing Features and +Included Features should be contained in files named "license.html" ("Feature Licenses"). Abouts and Feature Licenses may be located in any directory of a Download or Module +including, but not limited to the following locations:

+ +
    +
  • The top-level (root) directory
  • +
  • Plug-in and Fragment directories
  • +
  • Inside Plug-ins and Fragments packaged as JARs
  • +
  • Sub-directories of the directory named "src" of certain Plug-ins
  • +
  • Feature directories
  • +
+ +

Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license ("Feature Update License") during the +installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or +inform you where you can locate them. Feature Update Licenses may be found in the "license" property of files named "feature.properties" found within a Feature. +Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in +that directory.

+ +

THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE +OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):

+ + + +

IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please +contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.

+ + +

Use of Provisioning Technology

+ +

The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse + Update Manager ("Provisioning Technology") for the purpose of allowing users to install software, documentation, information and/or + other materials (collectively "Installable Software"). This capability is provided with the intent of allowing such users to + install, extend and update Eclipse-based products. Information about packaging Installable Software is available at http://eclipse.org/equinox/p2/repository_packaging.html + ("Specification").

+ +

You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the + applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology + in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the + Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:

+ +
    +
  1. A series of actions may occur ("Provisioning Process") in which a user may execute the Provisioning Technology + on a machine ("Target Machine") with the intent of installing, extending or updating the functionality of an Eclipse-based + product.
  2. +
  3. During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be + accessed and copied to the Target Machine.
  4. +
  5. Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable + Software ("Installable Software Agreement") and such Installable Software Agreement shall be accessed from the Target + Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern + the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such + indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.
  6. +
+ +

Cryptography

+ +

Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to + another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import, + possession, or use, and re-export of encryption software, to see if this is permitted.

+ +

Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.

+ + diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/oops.go b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go new file mode 100644 index 00000000..39630d7f --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/oops.go @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +func chkerr(e error) { + if e != nil { + panic(e) + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options.go b/vendor/github.com/eclipse/paho.mqtt.golang/options.go new file mode 100644 index 00000000..e96e9ed7 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/options.go @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +// Portions copyright © 2018 TIBCO Software Inc. + +package mqtt + +import ( + "crypto/tls" + "net/http" + "net/url" + "strings" + "time" +) + +// CredentialsProvider allows the username and password to be updated +// before reconnecting. It should return the current username and password. +type CredentialsProvider func() (username string, password string) + +// MessageHandler is a callback type which can be set to be +// executed upon the arrival of messages published to topics +// to which the client is subscribed. +type MessageHandler func(Client, Message) + +// ConnectionLostHandler is a callback type which can be set to be +// executed upon an unintended disconnection from the MQTT broker. +// Disconnects caused by calling Disconnect or ForceDisconnect will +// not cause an OnConnectionLost callback to execute. +type ConnectionLostHandler func(Client, error) + +// OnConnectHandler is a callback that is called when the client +// state changes from unconnected/disconnected to connected. Both +// at initial connection and on reconnection +type OnConnectHandler func(Client) + +// ClientOptions contains configurable options for an Client. +type ClientOptions struct { + Servers []*url.URL + ClientID string + Username string + Password string + CredentialsProvider CredentialsProvider + CleanSession bool + Order bool + WillEnabled bool + WillTopic string + WillPayload []byte + WillQos byte + WillRetained bool + ProtocolVersion uint + protocolVersionExplicit bool + TLSConfig *tls.Config + KeepAlive int64 + PingTimeout time.Duration + ConnectTimeout time.Duration + MaxReconnectInterval time.Duration + AutoReconnect bool + Store Store + DefaultPublishHandler MessageHandler + OnConnect OnConnectHandler + OnConnectionLost ConnectionLostHandler + WriteTimeout time.Duration + MessageChannelDepth uint + ResumeSubs bool + HTTPHeaders http.Header +} + +// NewClientOptions will create a new ClientClientOptions type with some +// default values. +// Port: 1883 +// CleanSession: True +// Order: True +// KeepAlive: 30 (seconds) +// ConnectTimeout: 30 (seconds) +// MaxReconnectInterval 10 (minutes) +// AutoReconnect: True +func NewClientOptions() *ClientOptions { + o := &ClientOptions{ + Servers: nil, + ClientID: "", + Username: "", + Password: "", + CleanSession: true, + Order: true, + WillEnabled: false, + WillTopic: "", + WillPayload: nil, + WillQos: 0, + WillRetained: false, + ProtocolVersion: 0, + protocolVersionExplicit: false, + KeepAlive: 30, + PingTimeout: 10 * time.Second, + ConnectTimeout: 30 * time.Second, + MaxReconnectInterval: 10 * time.Minute, + AutoReconnect: true, + Store: nil, + OnConnect: nil, + OnConnectionLost: DefaultConnectionLostHandler, + WriteTimeout: 0, // 0 represents timeout disabled + MessageChannelDepth: 100, + ResumeSubs: false, + HTTPHeaders: make(map[string][]string), + } + return o +} + +// AddBroker adds a broker URI to the list of brokers to be used. The format should be +// scheme://host:port +// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname) +// and "port" is the port on which the broker is accepting connections. +// +// Default values for hostname is "127.0.0.1", for schema is "tcp://". +// +// An example broker URI would look like: tcp://foobar.com:1883 +func (o *ClientOptions) AddBroker(server string) *ClientOptions { + if len(server) > 0 && server[0] == ':' { + server = "127.0.0.1" + server + } + if !strings.Contains(server, "://") { + server = "tcp://" + server + } + brokerURI, err := url.Parse(server) + if err != nil { + ERROR.Println(CLI, "Failed to parse %q broker address: %s", server, err) + return o + } + o.Servers = append(o.Servers, brokerURI) + return o +} + +// SetResumeSubs will enable resuming of stored (un)subscribe messages when connecting +// but not reconnecting if CleanSession is false. Otherwise these messages are discarded. +func (o *ClientOptions) SetResumeSubs(resume bool) *ClientOptions { + o.ResumeSubs = resume + return o +} + +// SetClientID will set the client id to be used by this client when +// connecting to the MQTT broker. According to the MQTT v3.1 specification, +// a client id mus be no longer than 23 characters. +func (o *ClientOptions) SetClientID(id string) *ClientOptions { + o.ClientID = id + return o +} + +// SetUsername will set the username to be used by this client when connecting +// to the MQTT broker. Note: without the use of SSL/TLS, this information will +// be sent in plaintext accross the wire. +func (o *ClientOptions) SetUsername(u string) *ClientOptions { + o.Username = u + return o +} + +// SetPassword will set the password to be used by this client when connecting +// to the MQTT broker. Note: without the use of SSL/TLS, this information will +// be sent in plaintext accross the wire. +func (o *ClientOptions) SetPassword(p string) *ClientOptions { + o.Password = p + return o +} + +// SetCredentialsProvider will set a method to be called by this client when +// connecting to the MQTT broker that provide the current username and password. +// Note: without the use of SSL/TLS, this information will be sent +// in plaintext accross the wire. +func (o *ClientOptions) SetCredentialsProvider(p CredentialsProvider) *ClientOptions { + o.CredentialsProvider = p + return o +} + +// SetCleanSession will set the "clean session" flag in the connect message +// when this client connects to an MQTT broker. By setting this flag, you are +// indicating that no messages saved by the broker for this client should be +// delivered. Any messages that were going to be sent by this client before +// diconnecting previously but didn't will not be sent upon connecting to the +// broker. +func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions { + o.CleanSession = clean + return o +} + +// SetOrderMatters will set the message routing to guarantee order within +// each QoS level. By default, this value is true. If set to false, +// this flag indicates that messages can be delivered asynchronously +// from the client to the application and possibly arrive out of order. +func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions { + o.Order = order + return o +} + +// SetTLSConfig will set an SSL/TLS configuration to be used when connecting +// to an MQTT broker. Please read the official Go documentation for more +// information. +func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions { + o.TLSConfig = t + return o +} + +// SetStore will set the implementation of the Store interface +// used to provide message persistence in cases where QoS levels +// QoS_ONE or QoS_TWO are used. If no store is provided, then the +// client will use MemoryStore by default. +func (o *ClientOptions) SetStore(s Store) *ClientOptions { + o.Store = s + return o +} + +// SetKeepAlive will set the amount of time (in seconds) that the client +// should wait before sending a PING request to the broker. This will +// allow the client to know that a connection has not been lost with the +// server. +func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions { + o.KeepAlive = int64(k / time.Second) + return o +} + +// SetPingTimeout will set the amount of time (in seconds) that the client +// will wait after sending a PING request to the broker, before deciding +// that the connection has been lost. Default is 10 seconds. +func (o *ClientOptions) SetPingTimeout(k time.Duration) *ClientOptions { + o.PingTimeout = k + return o +} + +// SetProtocolVersion sets the MQTT version to be used to connect to the +// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1 +func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions { + if (pv >= 3 && pv <= 4) || (pv > 0x80) { + o.ProtocolVersion = pv + o.protocolVersionExplicit = true + } + return o +} + +// UnsetWill will cause any set will message to be disregarded. +func (o *ClientOptions) UnsetWill() *ClientOptions { + o.WillEnabled = false + return o +} + +// SetWill accepts a string will message to be set. When the client connects, +// it will give this will message to the broker, which will then publish the +// provided payload (the will) to any clients that are subscribed to the provided +// topic. +func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions { + o.SetBinaryWill(topic, []byte(payload), qos, retained) + return o +} + +// SetBinaryWill accepts a []byte will message to be set. When the client connects, +// it will give this will message to the broker, which will then publish the +// provided payload (the will) to any clients that are subscribed to the provided +// topic. +func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions { + o.WillEnabled = true + o.WillTopic = topic + o.WillPayload = payload + o.WillQos = qos + o.WillRetained = retained + return o +} + +// SetDefaultPublishHandler sets the MessageHandler that will be called when a message +// is received that does not match any known subscriptions. +func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions { + o.DefaultPublishHandler = defaultHandler + return o +} + +// SetOnConnectHandler sets the function to be called when the client is connected. Both +// at initial connection time and upon automatic reconnect. +func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions { + o.OnConnect = onConn + return o +} + +// SetConnectionLostHandler will set the OnConnectionLost callback to be executed +// in the case where the client unexpectedly loses connection with the MQTT broker. +func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions { + o.OnConnectionLost = onLost + return o +} + +// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a +// timeout error. A duration of 0 never times out. Default 30 seconds +func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions { + o.WriteTimeout = t + return o +} + +// SetConnectTimeout limits how long the client will wait when trying to open a connection +// to an MQTT server before timeing out and erroring the attempt. A duration of 0 never times out. +// Default 30 seconds. Currently only operational on TCP/TLS connections. +func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions { + o.ConnectTimeout = t + return o +} + +// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts +// when connection is lost +func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions { + o.MaxReconnectInterval = t + return o +} + +// SetAutoReconnect sets whether the automatic reconnection logic should be used +// when the connection is lost, even if disabled the ConnectionLostHandler is still +// called +func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions { + o.AutoReconnect = a + return o +} + +// SetMessageChannelDepth sets the size of the internal queue that holds messages while the +// client is temporairily offline, allowing the application to publish when the client is +// reconnecting. This setting is only valid if AutoReconnect is set to true, it is otherwise +// ignored. +func (o *ClientOptions) SetMessageChannelDepth(s uint) *ClientOptions { + o.MessageChannelDepth = s + return o +} + +// SetHTTPHeaders sets the additional HTTP headers that will be sent in the WebSocket +// opening handshake. +func (o *ClientOptions) SetHTTPHeaders(h http.Header) *ClientOptions { + o.HTTPHeaders = h + return o +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go new file mode 100644 index 00000000..60144b93 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/options_reader.go @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "net/http" + "net/url" + "time" +) + +// ClientOptionsReader provides an interface for reading ClientOptions after the client has been initialized. +type ClientOptionsReader struct { + options *ClientOptions +} + +//Servers returns a slice of the servers defined in the clientoptions +func (r *ClientOptionsReader) Servers() []*url.URL { + s := make([]*url.URL, len(r.options.Servers)) + + for i, u := range r.options.Servers { + nu := *u + s[i] = &nu + } + + return s +} + +//ResumeSubs returns true if resuming stored (un)sub is enabled +func (r *ClientOptionsReader) ResumeSubs() bool { + s := r.options.ResumeSubs + return s +} + +//ClientID returns the set client id +func (r *ClientOptionsReader) ClientID() string { + s := r.options.ClientID + return s +} + +//Username returns the set username +func (r *ClientOptionsReader) Username() string { + s := r.options.Username + return s +} + +//Password returns the set password +func (r *ClientOptionsReader) Password() string { + s := r.options.Password + return s +} + +//CleanSession returns whether Cleansession is set +func (r *ClientOptionsReader) CleanSession() bool { + s := r.options.CleanSession + return s +} + +func (r *ClientOptionsReader) Order() bool { + s := r.options.Order + return s +} + +func (r *ClientOptionsReader) WillEnabled() bool { + s := r.options.WillEnabled + return s +} + +func (r *ClientOptionsReader) WillTopic() string { + s := r.options.WillTopic + return s +} + +func (r *ClientOptionsReader) WillPayload() []byte { + s := r.options.WillPayload + return s +} + +func (r *ClientOptionsReader) WillQos() byte { + s := r.options.WillQos + return s +} + +func (r *ClientOptionsReader) WillRetained() bool { + s := r.options.WillRetained + return s +} + +func (r *ClientOptionsReader) ProtocolVersion() uint { + s := r.options.ProtocolVersion + return s +} + +func (r *ClientOptionsReader) TLSConfig() *tls.Config { + s := r.options.TLSConfig + return s +} + +func (r *ClientOptionsReader) KeepAlive() time.Duration { + s := time.Duration(r.options.KeepAlive * int64(time.Second)) + return s +} + +func (r *ClientOptionsReader) PingTimeout() time.Duration { + s := r.options.PingTimeout + return s +} + +func (r *ClientOptionsReader) ConnectTimeout() time.Duration { + s := r.options.ConnectTimeout + return s +} + +func (r *ClientOptionsReader) MaxReconnectInterval() time.Duration { + s := r.options.MaxReconnectInterval + return s +} + +func (r *ClientOptionsReader) AutoReconnect() bool { + s := r.options.AutoReconnect + return s +} + +func (r *ClientOptionsReader) WriteTimeout() time.Duration { + s := r.options.WriteTimeout + return s +} + +func (r *ClientOptionsReader) MessageChannelDepth() uint { + s := r.options.MessageChannelDepth + return s +} + +func (r *ClientOptionsReader) HTTPHeaders() http.Header { + h := r.options.HTTPHeaders + return h +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go new file mode 100644 index 00000000..25cf30f6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connack.go @@ -0,0 +1,55 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//ConnackPacket is an internal representation of the fields of the +//Connack MQTT packet +type ConnackPacket struct { + FixedHeader + SessionPresent bool + ReturnCode byte +} + +func (ca *ConnackPacket) String() string { + str := fmt.Sprintf("%s", ca.FixedHeader) + str += " " + str += fmt.Sprintf("sessionpresent: %t returncode: %d", ca.SessionPresent, ca.ReturnCode) + return str +} + +func (ca *ConnackPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.WriteByte(boolToByte(ca.SessionPresent)) + body.WriteByte(ca.ReturnCode) + ca.FixedHeader.RemainingLength = 2 + packet := ca.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (ca *ConnackPacket) Unpack(b io.Reader) error { + flags, err := decodeByte(b) + if err != nil { + return err + } + ca.SessionPresent = 1&flags > 0 + ca.ReturnCode, err = decodeByte(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (ca *ConnackPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go new file mode 100644 index 00000000..cb03ebc0 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/connect.go @@ -0,0 +1,154 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//ConnectPacket is an internal representation of the fields of the +//Connect MQTT packet +type ConnectPacket struct { + FixedHeader + ProtocolName string + ProtocolVersion byte + CleanSession bool + WillFlag bool + WillQos byte + WillRetain bool + UsernameFlag bool + PasswordFlag bool + ReservedBit byte + Keepalive uint16 + + ClientIdentifier string + WillTopic string + WillMessage []byte + Username string + Password []byte +} + +func (c *ConnectPacket) String() string { + str := fmt.Sprintf("%s", c.FixedHeader) + str += " " + str += fmt.Sprintf("protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalive: %d clientId: %s willtopic: %s willmessage: %s Username: %s Password: %s", c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.Keepalive, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password) + return str +} + +func (c *ConnectPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeString(c.ProtocolName)) + body.WriteByte(c.ProtocolVersion) + body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7) + body.Write(encodeUint16(c.Keepalive)) + body.Write(encodeString(c.ClientIdentifier)) + if c.WillFlag { + body.Write(encodeString(c.WillTopic)) + body.Write(encodeBytes(c.WillMessage)) + } + if c.UsernameFlag { + body.Write(encodeString(c.Username)) + } + if c.PasswordFlag { + body.Write(encodeBytes(c.Password)) + } + c.FixedHeader.RemainingLength = body.Len() + packet := c.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (c *ConnectPacket) Unpack(b io.Reader) error { + var err error + c.ProtocolName, err = decodeString(b) + if err != nil { + return err + } + c.ProtocolVersion, err = decodeByte(b) + if err != nil { + return err + } + options, err := decodeByte(b) + if err != nil { + return err + } + c.ReservedBit = 1 & options + c.CleanSession = 1&(options>>1) > 0 + c.WillFlag = 1&(options>>2) > 0 + c.WillQos = 3 & (options >> 3) + c.WillRetain = 1&(options>>5) > 0 + c.PasswordFlag = 1&(options>>6) > 0 + c.UsernameFlag = 1&(options>>7) > 0 + c.Keepalive, err = decodeUint16(b) + if err != nil { + return err + } + c.ClientIdentifier, err = decodeString(b) + if err != nil { + return err + } + if c.WillFlag { + c.WillTopic, err = decodeString(b) + if err != nil { + return err + } + c.WillMessage, err = decodeBytes(b) + if err != nil { + return err + } + } + if c.UsernameFlag { + c.Username, err = decodeString(b) + if err != nil { + return err + } + } + if c.PasswordFlag { + c.Password, err = decodeBytes(b) + if err != nil { + return err + } + } + + return nil +} + +//Validate performs validation of the fields of a Connect packet +func (c *ConnectPacket) Validate() byte { + if c.PasswordFlag && !c.UsernameFlag { + return ErrRefusedBadUsernameOrPassword + } + if c.ReservedBit != 0 { + //Bad reserved bit + return ErrProtocolViolation + } + if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) { + //Mismatched or unsupported protocol version + return ErrRefusedBadProtocolVersion + } + if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" { + //Bad protocol name + return ErrProtocolViolation + } + if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 { + //Bad size field + return ErrProtocolViolation + } + if len(c.ClientIdentifier) == 0 && !c.CleanSession { + //Bad client identifier + return ErrRefusedIDRejected + } + return Accepted +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (c *ConnectPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go new file mode 100644 index 00000000..e5c18692 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/disconnect.go @@ -0,0 +1,36 @@ +package packets + +import ( + "fmt" + "io" +) + +//DisconnectPacket is an internal representation of the fields of the +//Disconnect MQTT packet +type DisconnectPacket struct { + FixedHeader +} + +func (d *DisconnectPacket) String() string { + str := fmt.Sprintf("%s", d.FixedHeader) + return str +} + +func (d *DisconnectPacket) Write(w io.Writer) error { + packet := d.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (d *DisconnectPacket) Unpack(b io.Reader) error { + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (d *DisconnectPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go new file mode 100644 index 00000000..42eeb46d --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/packets.go @@ -0,0 +1,346 @@ +package packets + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" +) + +//ControlPacket defines the interface for structs intended to hold +//decoded MQTT packets, either from being read or before being +//written +type ControlPacket interface { + Write(io.Writer) error + Unpack(io.Reader) error + String() string + Details() Details +} + +//PacketNames maps the constants for each of the MQTT packet types +//to a string representation of their name. +var PacketNames = map[uint8]string{ + 1: "CONNECT", + 2: "CONNACK", + 3: "PUBLISH", + 4: "PUBACK", + 5: "PUBREC", + 6: "PUBREL", + 7: "PUBCOMP", + 8: "SUBSCRIBE", + 9: "SUBACK", + 10: "UNSUBSCRIBE", + 11: "UNSUBACK", + 12: "PINGREQ", + 13: "PINGRESP", + 14: "DISCONNECT", +} + +//Below are the constants assigned to each of the MQTT packet types +const ( + Connect = 1 + Connack = 2 + Publish = 3 + Puback = 4 + Pubrec = 5 + Pubrel = 6 + Pubcomp = 7 + Subscribe = 8 + Suback = 9 + Unsubscribe = 10 + Unsuback = 11 + Pingreq = 12 + Pingresp = 13 + Disconnect = 14 +) + +//Below are the const definitions for error codes returned by +//Connect() +const ( + Accepted = 0x00 + ErrRefusedBadProtocolVersion = 0x01 + ErrRefusedIDRejected = 0x02 + ErrRefusedServerUnavailable = 0x03 + ErrRefusedBadUsernameOrPassword = 0x04 + ErrRefusedNotAuthorised = 0x05 + ErrNetworkError = 0xFE + ErrProtocolViolation = 0xFF +) + +//ConnackReturnCodes is a map of the error codes constants for Connect() +//to a string representation of the error +var ConnackReturnCodes = map[uint8]string{ + 0: "Connection Accepted", + 1: "Connection Refused: Bad Protocol Version", + 2: "Connection Refused: Client Identifier Rejected", + 3: "Connection Refused: Server Unavailable", + 4: "Connection Refused: Username or Password in unknown format", + 5: "Connection Refused: Not Authorised", + 254: "Connection Error", + 255: "Connection Refused: Protocol Violation", +} + +//ConnErrors is a map of the errors codes constants for Connect() +//to a Go error +var ConnErrors = map[byte]error{ + Accepted: nil, + ErrRefusedBadProtocolVersion: errors.New("Unnacceptable protocol version"), + ErrRefusedIDRejected: errors.New("Identifier rejected"), + ErrRefusedServerUnavailable: errors.New("Server Unavailable"), + ErrRefusedBadUsernameOrPassword: errors.New("Bad user name or password"), + ErrRefusedNotAuthorised: errors.New("Not Authorized"), + ErrNetworkError: errors.New("Network Error"), + ErrProtocolViolation: errors.New("Protocol Violation"), +} + +//ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts +//to read an MQTT packet from the stream. It returns a ControlPacket +//representing the decoded MQTT packet and an error. One of these returns will +//always be nil, a nil ControlPacket indicating an error occurred. +func ReadPacket(r io.Reader) (ControlPacket, error) { + var fh FixedHeader + b := make([]byte, 1) + + _, err := io.ReadFull(r, b) + if err != nil { + return nil, err + } + + err = fh.unpack(b[0], r) + if err != nil { + return nil, err + } + + cp, err := NewControlPacketWithHeader(fh) + if err != nil { + return nil, err + } + + packetBytes := make([]byte, fh.RemainingLength) + n, err := io.ReadFull(r, packetBytes) + if err != nil { + return nil, err + } + if n != fh.RemainingLength { + return nil, errors.New("Failed to read expected data") + } + + err = cp.Unpack(bytes.NewBuffer(packetBytes)) + return cp, err +} + +//NewControlPacket is used to create a new ControlPacket of the type specified +//by packetType, this is usually done by reference to the packet type constants +//defined in packets.go. The newly created ControlPacket is empty and a pointer +//is returned. +func NewControlPacket(packetType byte) ControlPacket { + switch packetType { + case Connect: + return &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}} + case Connack: + return &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}} + case Disconnect: + return &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}} + case Publish: + return &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}} + case Puback: + return &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}} + case Pubrec: + return &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}} + case Pubrel: + return &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}} + case Pubcomp: + return &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}} + case Subscribe: + return &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}} + case Suback: + return &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}} + case Unsubscribe: + return &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}} + case Unsuback: + return &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}} + case Pingreq: + return &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}} + case Pingresp: + return &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}} + } + return nil +} + +//NewControlPacketWithHeader is used to create a new ControlPacket of the type +//specified within the FixedHeader that is passed to the function. +//The newly created ControlPacket is empty and a pointer is returned. +func NewControlPacketWithHeader(fh FixedHeader) (ControlPacket, error) { + switch fh.MessageType { + case Connect: + return &ConnectPacket{FixedHeader: fh}, nil + case Connack: + return &ConnackPacket{FixedHeader: fh}, nil + case Disconnect: + return &DisconnectPacket{FixedHeader: fh}, nil + case Publish: + return &PublishPacket{FixedHeader: fh}, nil + case Puback: + return &PubackPacket{FixedHeader: fh}, nil + case Pubrec: + return &PubrecPacket{FixedHeader: fh}, nil + case Pubrel: + return &PubrelPacket{FixedHeader: fh}, nil + case Pubcomp: + return &PubcompPacket{FixedHeader: fh}, nil + case Subscribe: + return &SubscribePacket{FixedHeader: fh}, nil + case Suback: + return &SubackPacket{FixedHeader: fh}, nil + case Unsubscribe: + return &UnsubscribePacket{FixedHeader: fh}, nil + case Unsuback: + return &UnsubackPacket{FixedHeader: fh}, nil + case Pingreq: + return &PingreqPacket{FixedHeader: fh}, nil + case Pingresp: + return &PingrespPacket{FixedHeader: fh}, nil + } + return nil, fmt.Errorf("unsupported packet type 0x%x", fh.MessageType) +} + +//Details struct returned by the Details() function called on +//ControlPackets to present details of the Qos and MessageID +//of the ControlPacket +type Details struct { + Qos byte + MessageID uint16 +} + +//FixedHeader is a struct to hold the decoded information from +//the fixed header of an MQTT ControlPacket +type FixedHeader struct { + MessageType byte + Dup bool + Qos byte + Retain bool + RemainingLength int +} + +func (fh FixedHeader) String() string { + return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength) +} + +func boolToByte(b bool) byte { + switch b { + case true: + return 1 + default: + return 0 + } +} + +func (fh *FixedHeader) pack() bytes.Buffer { + var header bytes.Buffer + header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain)) + header.Write(encodeLength(fh.RemainingLength)) + return header +} + +func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) error { + fh.MessageType = typeAndFlags >> 4 + fh.Dup = (typeAndFlags>>3)&0x01 > 0 + fh.Qos = (typeAndFlags >> 1) & 0x03 + fh.Retain = typeAndFlags&0x01 > 0 + + var err error + fh.RemainingLength, err = decodeLength(r) + return err +} + +func decodeByte(b io.Reader) (byte, error) { + num := make([]byte, 1) + _, err := b.Read(num) + if err != nil { + return 0, err + } + + return num[0], nil +} + +func decodeUint16(b io.Reader) (uint16, error) { + num := make([]byte, 2) + _, err := b.Read(num) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(num), nil +} + +func encodeUint16(num uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, num) + return bytes +} + +func encodeString(field string) []byte { + return encodeBytes([]byte(field)) +} + +func decodeString(b io.Reader) (string, error) { + buf, err := decodeBytes(b) + return string(buf), err +} + +func decodeBytes(b io.Reader) ([]byte, error) { + fieldLength, err := decodeUint16(b) + if err != nil { + return nil, err + } + + field := make([]byte, fieldLength) + _, err = b.Read(field) + if err != nil { + return nil, err + } + + return field, nil +} + +func encodeBytes(field []byte) []byte { + fieldLength := make([]byte, 2) + binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) + return append(fieldLength, field...) +} + +func encodeLength(length int) []byte { + var encLength []byte + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + encLength = append(encLength, digit) + if length == 0 { + break + } + } + return encLength +} + +func decodeLength(r io.Reader) (int, error) { + var rLength uint32 + var multiplier uint32 + b := make([]byte, 1) + for multiplier < 27 { //fix: Infinite '(digit & 128) == 1' will cause the dead loop + _, err := io.ReadFull(r, b) + if err != nil { + return 0, err + } + + digit := b[0] + rLength |= uint32(digit&127) << multiplier + if (digit & 128) == 0 { + break + } + multiplier += 7 + } + return int(rLength), nil +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go new file mode 100644 index 00000000..5c3e88f9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingreq.go @@ -0,0 +1,36 @@ +package packets + +import ( + "fmt" + "io" +) + +//PingreqPacket is an internal representation of the fields of the +//Pingreq MQTT packet +type PingreqPacket struct { + FixedHeader +} + +func (pr *PingreqPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + return str +} + +func (pr *PingreqPacket) Write(w io.Writer) error { + packet := pr.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PingreqPacket) Unpack(b io.Reader) error { + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PingreqPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go new file mode 100644 index 00000000..39ebc001 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pingresp.go @@ -0,0 +1,36 @@ +package packets + +import ( + "fmt" + "io" +) + +//PingrespPacket is an internal representation of the fields of the +//Pingresp MQTT packet +type PingrespPacket struct { + FixedHeader +} + +func (pr *PingrespPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + return str +} + +func (pr *PingrespPacket) Write(w io.Writer) error { + packet := pr.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PingrespPacket) Unpack(b io.Reader) error { + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PingrespPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go new file mode 100644 index 00000000..7c0cd7ef --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/puback.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubackPacket is an internal representation of the fields of the +//Puback MQTT packet +type PubackPacket struct { + FixedHeader + MessageID uint16 +} + +func (pa *PubackPacket) String() string { + str := fmt.Sprintf("%s", pa.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pa.MessageID) + return str +} + +func (pa *PubackPacket) Write(w io.Writer) error { + var err error + pa.FixedHeader.RemainingLength = 2 + packet := pa.FixedHeader.pack() + packet.Write(encodeUint16(pa.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pa *PubackPacket) Unpack(b io.Reader) error { + var err error + pa.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pa *PubackPacket) Details() Details { + return Details{Qos: pa.Qos, MessageID: pa.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go new file mode 100644 index 00000000..4f6f6e21 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubcomp.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubcompPacket is an internal representation of the fields of the +//Pubcomp MQTT packet +type PubcompPacket struct { + FixedHeader + MessageID uint16 +} + +func (pc *PubcompPacket) String() string { + str := fmt.Sprintf("%s", pc.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pc.MessageID) + return str +} + +func (pc *PubcompPacket) Write(w io.Writer) error { + var err error + pc.FixedHeader.RemainingLength = 2 + packet := pc.FixedHeader.pack() + packet.Write(encodeUint16(pc.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pc *PubcompPacket) Unpack(b io.Reader) error { + var err error + pc.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pc *PubcompPacket) Details() Details { + return Details{Qos: pc.Qos, MessageID: pc.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go new file mode 100644 index 00000000..adc9adb9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/publish.go @@ -0,0 +1,88 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//PublishPacket is an internal representation of the fields of the +//Publish MQTT packet +type PublishPacket struct { + FixedHeader + TopicName string + MessageID uint16 + Payload []byte +} + +func (p *PublishPacket) String() string { + str := fmt.Sprintf("%s", p.FixedHeader) + str += " " + str += fmt.Sprintf("topicName: %s MessageID: %d", p.TopicName, p.MessageID) + str += " " + str += fmt.Sprintf("payload: %s", string(p.Payload)) + return str +} + +func (p *PublishPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeString(p.TopicName)) + if p.Qos > 0 { + body.Write(encodeUint16(p.MessageID)) + } + p.FixedHeader.RemainingLength = body.Len() + len(p.Payload) + packet := p.FixedHeader.pack() + packet.Write(body.Bytes()) + packet.Write(p.Payload) + _, err = w.Write(packet.Bytes()) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (p *PublishPacket) Unpack(b io.Reader) error { + var payloadLength = p.FixedHeader.RemainingLength + var err error + p.TopicName, err = decodeString(b) + if err != nil { + return err + } + + if p.Qos > 0 { + p.MessageID, err = decodeUint16(b) + if err != nil { + return err + } + payloadLength -= len(p.TopicName) + 4 + } else { + payloadLength -= len(p.TopicName) + 2 + } + if payloadLength < 0 { + return fmt.Errorf("Error unpacking publish, payload length < 0") + } + p.Payload = make([]byte, payloadLength) + _, err = b.Read(p.Payload) + + return err +} + +//Copy creates a new PublishPacket with the same topic and payload +//but an empty fixed header, useful for when you want to deliver +//a message with different properties such as Qos but the same +//content +func (p *PublishPacket) Copy() *PublishPacket { + newP := NewControlPacket(Publish).(*PublishPacket) + newP.TopicName = p.TopicName + newP.Payload = p.Payload + + return newP +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (p *PublishPacket) Details() Details { + return Details{Qos: p.Qos, MessageID: p.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go new file mode 100644 index 00000000..483372b0 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrec.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubrecPacket is an internal representation of the fields of the +//Pubrec MQTT packet +type PubrecPacket struct { + FixedHeader + MessageID uint16 +} + +func (pr *PubrecPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pr.MessageID) + return str +} + +func (pr *PubrecPacket) Write(w io.Writer) error { + var err error + pr.FixedHeader.RemainingLength = 2 + packet := pr.FixedHeader.pack() + packet.Write(encodeUint16(pr.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PubrecPacket) Unpack(b io.Reader) error { + var err error + pr.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PubrecPacket) Details() Details { + return Details{Qos: pr.Qos, MessageID: pr.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go new file mode 100644 index 00000000..8590fd97 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/pubrel.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//PubrelPacket is an internal representation of the fields of the +//Pubrel MQTT packet +type PubrelPacket struct { + FixedHeader + MessageID uint16 +} + +func (pr *PubrelPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", pr.MessageID) + return str +} + +func (pr *PubrelPacket) Write(w io.Writer) error { + var err error + pr.FixedHeader.RemainingLength = 2 + packet := pr.FixedHeader.pack() + packet.Write(encodeUint16(pr.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PubrelPacket) Unpack(b io.Reader) error { + var err error + pr.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PubrelPacket) Details() Details { + return Details{Qos: pr.Qos, MessageID: pr.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go new file mode 100644 index 00000000..fc057247 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/suback.go @@ -0,0 +1,60 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//SubackPacket is an internal representation of the fields of the +//Suback MQTT packet +type SubackPacket struct { + FixedHeader + MessageID uint16 + ReturnCodes []byte +} + +func (sa *SubackPacket) String() string { + str := fmt.Sprintf("%s", sa.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", sa.MessageID) + return str +} + +func (sa *SubackPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + body.Write(encodeUint16(sa.MessageID)) + body.Write(sa.ReturnCodes) + sa.FixedHeader.RemainingLength = body.Len() + packet := sa.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (sa *SubackPacket) Unpack(b io.Reader) error { + var qosBuffer bytes.Buffer + var err error + sa.MessageID, err = decodeUint16(b) + if err != nil { + return err + } + + _, err = qosBuffer.ReadFrom(b) + if err != nil { + return err + } + sa.ReturnCodes = qosBuffer.Bytes() + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (sa *SubackPacket) Details() Details { + return Details{Qos: 0, MessageID: sa.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go new file mode 100644 index 00000000..0787ce07 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/subscribe.go @@ -0,0 +1,72 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//SubscribePacket is an internal representation of the fields of the +//Subscribe MQTT packet +type SubscribePacket struct { + FixedHeader + MessageID uint16 + Topics []string + Qoss []byte +} + +func (s *SubscribePacket) String() string { + str := fmt.Sprintf("%s", s.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d topics: %s", s.MessageID, s.Topics) + return str +} + +func (s *SubscribePacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeUint16(s.MessageID)) + for i, topic := range s.Topics { + body.Write(encodeString(topic)) + body.WriteByte(s.Qoss[i]) + } + s.FixedHeader.RemainingLength = body.Len() + packet := s.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (s *SubscribePacket) Unpack(b io.Reader) error { + var err error + s.MessageID, err = decodeUint16(b) + if err != nil { + return err + } + payloadLength := s.FixedHeader.RemainingLength - 2 + for payloadLength > 0 { + topic, err := decodeString(b) + if err != nil { + return err + } + s.Topics = append(s.Topics, topic) + qos, err := decodeByte(b) + if err != nil { + return err + } + s.Qoss = append(s.Qoss, qos) + payloadLength -= 2 + len(topic) + 1 //2 bytes of string length, plus string, plus 1 byte for Qos + } + + return nil +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (s *SubscribePacket) Details() Details { + return Details{Qos: 1, MessageID: s.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go new file mode 100644 index 00000000..4b40c273 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsuback.go @@ -0,0 +1,45 @@ +package packets + +import ( + "fmt" + "io" +) + +//UnsubackPacket is an internal representation of the fields of the +//Unsuback MQTT packet +type UnsubackPacket struct { + FixedHeader + MessageID uint16 +} + +func (ua *UnsubackPacket) String() string { + str := fmt.Sprintf("%s", ua.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", ua.MessageID) + return str +} + +func (ua *UnsubackPacket) Write(w io.Writer) error { + var err error + ua.FixedHeader.RemainingLength = 2 + packet := ua.FixedHeader.pack() + packet.Write(encodeUint16(ua.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (ua *UnsubackPacket) Unpack(b io.Reader) error { + var err error + ua.MessageID, err = decodeUint16(b) + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (ua *UnsubackPacket) Details() Details { + return Details{Qos: 0, MessageID: ua.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go new file mode 100644 index 00000000..2012c310 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/packets/unsubscribe.go @@ -0,0 +1,59 @@ +package packets + +import ( + "bytes" + "fmt" + "io" +) + +//UnsubscribePacket is an internal representation of the fields of the +//Unsubscribe MQTT packet +type UnsubscribePacket struct { + FixedHeader + MessageID uint16 + Topics []string +} + +func (u *UnsubscribePacket) String() string { + str := fmt.Sprintf("%s", u.FixedHeader) + str += " " + str += fmt.Sprintf("MessageID: %d", u.MessageID) + return str +} + +func (u *UnsubscribePacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + body.Write(encodeUint16(u.MessageID)) + for _, topic := range u.Topics { + body.Write(encodeString(topic)) + } + u.FixedHeader.RemainingLength = body.Len() + packet := u.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (u *UnsubscribePacket) Unpack(b io.Reader) error { + var err error + u.MessageID, err = decodeUint16(b) + if err != nil { + return err + } + + for topic, err := decodeString(b); err == nil && topic != ""; topic, err = decodeString(b) { + u.Topics = append(u.Topics, topic) + } + + return err +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (u *UnsubscribePacket) Details() Details { + return Details{Qos: 1, MessageID: u.MessageID} +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/ping.go b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go new file mode 100644 index 00000000..dcbcb1dd --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/ping.go @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "errors" + "sync/atomic" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +func keepalive(c *client) { + defer c.workers.Done() + DEBUG.Println(PNG, "keepalive starting") + var checkInterval int64 + var pingSent time.Time + + if c.options.KeepAlive > 10 { + checkInterval = 5 + } else { + checkInterval = c.options.KeepAlive / 2 + } + + intervalTicker := time.NewTicker(time.Duration(checkInterval * int64(time.Second))) + defer intervalTicker.Stop() + + for { + select { + case <-c.stop: + DEBUG.Println(PNG, "keepalive stopped") + return + case <-intervalTicker.C: + lastSent := c.lastSent.Load().(time.Time) + lastReceived := c.lastReceived.Load().(time.Time) + + DEBUG.Println(PNG, "ping check", time.Since(lastSent).Seconds()) + if time.Since(lastSent) >= time.Duration(c.options.KeepAlive*int64(time.Second)) || time.Since(lastReceived) >= time.Duration(c.options.KeepAlive*int64(time.Second)) { + if atomic.LoadInt32(&c.pingOutstanding) == 0 { + DEBUG.Println(PNG, "keepalive sending ping") + ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket) + //We don't want to wait behind large messages being sent, the Write call + //will block until it it able to send the packet. + atomic.StoreInt32(&c.pingOutstanding, 1) + ping.Write(c.conn) + c.lastSent.Store(time.Now()) + pingSent = time.Now() + } + } + if atomic.LoadInt32(&c.pingOutstanding) > 0 && time.Now().Sub(pingSent) >= c.options.PingTimeout { + CRITICAL.Println(PNG, "pingresp not received, disconnecting") + c.errors <- errors.New("pingresp not received, disconnecting") + return + } + } + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/router.go b/vendor/github.com/eclipse/paho.mqtt.golang/router.go new file mode 100644 index 00000000..7b4e8f80 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/router.go @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "container/list" + "strings" + "sync" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +// route is a type which associates MQTT Topic strings with a +// callback to be executed upon the arrival of a message associated +// with a subscription to that topic. +type route struct { + topic string + callback MessageHandler +} + +// match takes a slice of strings which represent the route being tested having been split on '/' +// separators, and a slice of strings representing the topic string in the published message, similarly +// split. +// The function determines if the topic string matches the route according to the MQTT topic rules +// and returns a boolean of the outcome +func match(route []string, topic []string) bool { + if len(route) == 0 { + if len(topic) == 0 { + return true + } + return false + } + + if len(topic) == 0 { + if route[0] == "#" { + return true + } + return false + } + + if route[0] == "#" { + return true + } + + if (route[0] == "+") || (route[0] == topic[0]) { + return match(route[1:], topic[1:]) + } + return false +} + +func routeIncludesTopic(route, topic string) bool { + return match(routeSplit(route), strings.Split(topic, "/")) +} + +// removes $share and sharename when splitting the route to allow +// shared subscription routes to correctly match the topic +func routeSplit(route string) []string { + var result []string + if strings.HasPrefix(route, "$share") { + result = strings.Split(route, "/")[2:] + } else { + result = strings.Split(route, "/") + } + return result +} + +// match takes the topic string of the published message and does a basic compare to the +// string of the current Route, if they match it returns true +func (r *route) match(topic string) bool { + return r.topic == topic || routeIncludesTopic(r.topic, topic) +} + +type router struct { + sync.RWMutex + routes *list.List + defaultHandler MessageHandler + messages chan *packets.PublishPacket + stop chan bool +} + +// newRouter returns a new instance of a Router and channel which can be used to tell the Router +// to stop +func newRouter() (*router, chan bool) { + router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket), stop: make(chan bool)} + stop := router.stop + return router, stop +} + +// addRoute takes a topic string and MessageHandler callback. It looks in the current list of +// routes to see if there is already a matching Route. If there is it replaces the current +// callback with the new one. If not it add a new entry to the list of Routes. +func (r *router) addRoute(topic string, callback MessageHandler) { + r.Lock() + defer r.Unlock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(topic) { + r := e.Value.(*route) + r.callback = callback + return + } + } + r.routes.PushBack(&route{topic: topic, callback: callback}) +} + +// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If +// found it removes the Route from the list. +func (r *router) deleteRoute(topic string) { + r.Lock() + defer r.Unlock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(topic) { + r.routes.Remove(e) + return + } + } +} + +// setDefaultHandler assigns a default callback that will be called if no matching Route +// is found for an incoming Publish. +func (r *router) setDefaultHandler(handler MessageHandler) { + r.Lock() + defer r.Unlock() + r.defaultHandler = handler +} + +// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that +// takes messages off the channel, matches them against the internal route list and calls the +// associated callback (or the defaultHandler, if one exists and no other route matched). If +// anything is sent down the stop channel the function will end. +func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *client) { + go func() { + for { + select { + case message := <-messages: + sent := false + r.RLock() + m := messageFromPublish(message, client.ackFunc(message)) + handlers := []MessageHandler{} + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(message.TopicName) { + if order { + handlers = append(handlers, e.Value.(*route).callback) + } else { + hd := e.Value.(*route).callback + go func() { + hd(client, m) + m.Ack() + }() + } + sent = true + } + } + if !sent && r.defaultHandler != nil { + if order { + handlers = append(handlers, r.defaultHandler) + } else { + go func() { + r.defaultHandler(client, m) + m.Ack() + }() + } + } + r.RUnlock() + for _, handler := range handlers { + func() { + handler(client, m) + m.Ack() + }() + } + case <-r.stop: + return + } + } + }() +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/store.go b/vendor/github.com/eclipse/paho.mqtt.golang/store.go new file mode 100644 index 00000000..24a76b7d --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/store.go @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "fmt" + "strconv" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +const ( + inboundPrefix = "i." + outboundPrefix = "o." +) + +// Store is an interface which can be used to provide implementations +// for message persistence. +// Because we may have to store distinct messages with the same +// message ID, we need a unique key for each message. This is +// possible by prepending "i." or "o." to each message id +type Store interface { + Open() + Put(key string, message packets.ControlPacket) + Get(key string) packets.ControlPacket + All() []string + Del(key string) + Close() + Reset() +} + +// A key MUST have the form "X.[messageid]" +// where X is 'i' or 'o' +func mIDFromKey(key string) uint16 { + s := key[2:] + i, err := strconv.Atoi(s) + chkerr(err) + return uint16(i) +} + +// Return true if key prefix is outbound +func isKeyOutbound(key string) bool { + return key[:2] == outboundPrefix +} + +// Return true if key prefix is inbound +func isKeyInbound(key string) bool { + return key[:2] == inboundPrefix +} + +// Return a string of the form "i.[id]" +func inboundKeyFromMID(id uint16) string { + return fmt.Sprintf("%s%d", inboundPrefix, id) +} + +// Return a string of the form "o.[id]" +func outboundKeyFromMID(id uint16) string { + return fmt.Sprintf("%s%d", outboundPrefix, id) +} + +// govern which outgoing messages are persisted +func persistOutbound(s Store, m packets.ControlPacket) { + switch m.Details().Qos { + case 0: + switch m.(type) { + case *packets.PubackPacket, *packets.PubcompPacket: + // Sending puback. delete matching publish + // from ibound + s.Del(inboundKeyFromMID(m.Details().MessageID)) + } + case 1: + switch m.(type) { + case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket: + // Sending publish. store in obound + // until puback received + s.Put(outboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid message type") + } + case 2: + switch m.(type) { + case *packets.PublishPacket: + // Sending publish. store in obound + // until pubrel received + s.Put(outboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid message type") + } + } +} + +// govern which incoming messages are persisted +func persistInbound(s Store, m packets.ControlPacket) { + switch m.Details().Qos { + case 0: + switch m.(type) { + case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket: + // Received a puback. delete matching publish + // from obound + s.Del(outboundKeyFromMID(m.Details().MessageID)) + case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket: + default: + ERROR.Println(STR, "Asked to persist an invalid messages type") + } + case 1: + switch m.(type) { + case *packets.PublishPacket, *packets.PubrelPacket: + // Received a publish. store it in ibound + // until puback sent + s.Put(inboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid messages type") + } + case 2: + switch m.(type) { + case *packets.PublishPacket: + // Received a publish. store it in ibound + // until pubrel received + s.Put(inboundKeyFromMID(m.Details().MessageID), m) + default: + ERROR.Println(STR, "Asked to persist an invalid messages type") + } + } +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/token.go b/vendor/github.com/eclipse/paho.mqtt.golang/token.go new file mode 100644 index 00000000..08185533 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/token.go @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2014 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Allan Stockdill-Mander + */ + +package mqtt + +import ( + "sync" + "time" + + "github.com/eclipse/paho.mqtt.golang/packets" +) + +// PacketAndToken is a struct that contains both a ControlPacket and a +// Token. This struct is passed via channels between the client interface +// code and the underlying code responsible for sending and receiving +// MQTT messages. +type PacketAndToken struct { + p packets.ControlPacket + t tokenCompletor +} + +// Token defines the interface for the tokens used to indicate when +// actions have completed. +type Token interface { + Wait() bool + WaitTimeout(time.Duration) bool + Error() error +} + +type TokenErrorSetter interface { + setError(error) +} + +type tokenCompletor interface { + Token + TokenErrorSetter + flowComplete() +} + +type baseToken struct { + m sync.RWMutex + complete chan struct{} + err error +} + +// Wait will wait indefinitely for the Token to complete, ie the Publish +// to be sent and confirmed receipt from the broker +func (b *baseToken) Wait() bool { + <-b.complete + return true +} + +// WaitTimeout takes a time.Duration to wait for the flow associated with the +// Token to complete, returns true if it returned before the timeout or +// returns false if the timeout occurred. In the case of a timeout the Token +// does not have an error set in case the caller wishes to wait again +func (b *baseToken) WaitTimeout(d time.Duration) bool { + b.m.Lock() + defer b.m.Unlock() + + timer := time.NewTimer(d) + select { + case <-b.complete: + if !timer.Stop() { + <-timer.C + } + return true + case <-timer.C: + } + + return false +} + +func (b *baseToken) flowComplete() { + select { + case <-b.complete: + default: + close(b.complete) + } +} + +func (b *baseToken) Error() error { + b.m.RLock() + defer b.m.RUnlock() + return b.err +} + +func (b *baseToken) setError(e error) { + b.m.Lock() + b.err = e + b.flowComplete() + b.m.Unlock() +} + +func newToken(tType byte) tokenCompletor { + switch tType { + case packets.Connect: + return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Subscribe: + return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)} + case packets.Publish: + return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Unsubscribe: + return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Disconnect: + return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}} + } + return nil +} + +// ConnectToken is an extension of Token containing the extra fields +// required to provide information about calls to Connect() +type ConnectToken struct { + baseToken + returnCode byte + sessionPresent bool +} + +// ReturnCode returns the acknowlegement code in the connack sent +// in response to a Connect() +func (c *ConnectToken) ReturnCode() byte { + c.m.RLock() + defer c.m.RUnlock() + return c.returnCode +} + +// SessionPresent returns a bool representing the value of the +// session present field in the connack sent in response to a Connect() +func (c *ConnectToken) SessionPresent() bool { + c.m.RLock() + defer c.m.RUnlock() + return c.sessionPresent +} + +// PublishToken is an extension of Token containing the extra fields +// required to provide information about calls to Publish() +type PublishToken struct { + baseToken + messageID uint16 +} + +// MessageID returns the MQTT message ID that was assigned to the +// Publish packet when it was sent to the broker +func (p *PublishToken) MessageID() uint16 { + return p.messageID +} + +// SubscribeToken is an extension of Token containing the extra fields +// required to provide information about calls to Subscribe() +type SubscribeToken struct { + baseToken + subs []string + subResult map[string]byte +} + +// Result returns a map of topics that were subscribed to along with +// the matching return code from the broker. This is either the Qos +// value of the subscription or an error code. +func (s *SubscribeToken) Result() map[string]byte { + s.m.RLock() + defer s.m.RUnlock() + return s.subResult +} + +// UnsubscribeToken is an extension of Token containing the extra fields +// required to provide information about calls to Unsubscribe() +type UnsubscribeToken struct { + baseToken +} + +// DisconnectToken is an extension of Token containing the extra fields +// required to provide information about calls to Disconnect() +type DisconnectToken struct { + baseToken +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/topic.go b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go new file mode 100644 index 00000000..6fa3ad2a --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/topic.go @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2014 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "errors" + "strings" +) + +//ErrInvalidQos is the error returned when an packet is to be sent +//with an invalid Qos value +var ErrInvalidQos = errors.New("Invalid QoS") + +//ErrInvalidTopicEmptyString is the error returned when a topic string +//is passed in that is 0 length +var ErrInvalidTopicEmptyString = errors.New("Invalid Topic; empty string") + +//ErrInvalidTopicMultilevel is the error returned when a topic string +//is passed in that has the multi level wildcard in any position but +//the last +var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard must be last level") + +// Topic Names and Topic Filters +// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard +// to the validity of Topic strings. +// - A Topic must be between 1 and 65535 bytes. +// - A Topic is case sensitive. +// - A Topic may contain whitespace. +// - A Topic containing a leading forward slash is different than a Topic without. +// - A Topic may be "/" (two levels, both empty string). +// - A Topic must be UTF-8 encoded. +// - A Topic may contain any number of levels. +// - A Topic may contain an empty level (two forward slashes in a row). +// - A TopicName may not contain a wildcard. +// - A TopicFilter may only have a # (multi-level) wildcard as the last level. +// - A TopicFilter may contain any number of + (single-level) wildcards. +// - A TopicFilter with a # will match the absense of a level +// Example: a subscription to "foo/#" will match messages published to "foo". + +func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) { + var topics []string + var qoss []byte + for topic, qos := range subs { + if err := validateTopicAndQos(topic, qos); err != nil { + return nil, nil, err + } + topics = append(topics, topic) + qoss = append(qoss, qos) + } + + return topics, qoss, nil +} + +func validateTopicAndQos(topic string, qos byte) error { + if len(topic) == 0 { + return ErrInvalidTopicEmptyString + } + + levels := strings.Split(topic, "/") + for i, level := range levels { + if level == "#" && i != len(levels)-1 { + return ErrInvalidTopicMultilevel + } + } + + if qos < 0 || qos > 2 { + return ErrInvalidQos + } + return nil +} diff --git a/vendor/github.com/eclipse/paho.mqtt.golang/trace.go b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go new file mode 100644 index 00000000..195c8173 --- /dev/null +++ b/vendor/github.com/eclipse/paho.mqtt.golang/trace.go @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +type ( + // Logger interface allows implementations to provide to this package any + // object that implements the methods defined in it. + Logger interface { + Println(v ...interface{}) + Printf(format string, v ...interface{}) + } + + // NOOPLogger implements the logger that does not perform any operation + // by default. This allows us to efficiently discard the unwanted messages. + NOOPLogger struct{} +) + +func (NOOPLogger) Println(v ...interface{}) {} +func (NOOPLogger) Printf(format string, v ...interface{}) {} + +// Internal levels of library output that are initialised to not print +// anything but can be overridden by programmer +var ( + ERROR Logger = NOOPLogger{} + CRITICAL Logger = NOOPLogger{} + WARN Logger = NOOPLogger{} + DEBUG Logger = NOOPLogger{} +) diff --git a/vendor/github.com/farshidtz/elog/.travis.yml b/vendor/github.com/farshidtz/elog/.travis.yml new file mode 100644 index 00000000..c74e4fa5 --- /dev/null +++ b/vendor/github.com/farshidtz/elog/.travis.yml @@ -0,0 +1,4 @@ +language: go + +go: + - 1.x \ No newline at end of file diff --git a/vendor/github.com/farshidtz/elog/LICENSE b/vendor/github.com/farshidtz/elog/LICENSE new file mode 100644 index 00000000..c9f93b00 --- /dev/null +++ b/vendor/github.com/farshidtz/elog/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Farshid Tavakolizadeh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/farshidtz/elog/README.md b/vendor/github.com/farshidtz/elog/README.md new file mode 100644 index 00000000..d432c098 --- /dev/null +++ b/vendor/github.com/farshidtz/elog/README.md @@ -0,0 +1,156 @@ +# elog +[![GoDoc](https://godoc.org/github.com/farshidtz/elog?status.svg)](https://godoc.org/github.com/farshidtz/elog) +[![Build Status](https://travis-ci.org/farshidtz/elog.svg?branch=master)](https://travis-ci.org/farshidtz/elog) + +elog extends Go's built-in [log](https://golang.org/pkg/log) package to enable simple levelled logging and to modify the formatting. + +## Debugging mode +The debugging mode can be enabled by [setting the configured environment variable](https://github.com/farshidtz/elog#setting-an-environment-variable) ("DEBUG" by default) to 1. Alternatively, the debugging can be enabled using a [flag](https://github.com/farshidtz/elog#enable-debugging-with-a-flag). + +## Usage +Get the package + + go get github.com/farshidtz/elog +Import +```go +import "github.com/farshidtz/elog" +``` + +Examples +```go +10 // Initialize with the default configuration +11 logger := elog.New("[main] ", nil) +12 +13 logger.Println("Hello world!") +14 logger.Debugln("Hello world!") +15 logger.Fatalln("Hello world!") +``` +Debugging not enabled +``` +2016/07/14 16:47:04 [main] Hello world! +2016/07/14 16:47:04 [main] Hello world! +exit with status 1 +``` +Debugging enabled +``` +2016/07/14 16:47:04 [main] main.go:13: Hello world! +2016/07/14 16:47:04 [debug] main.go:14 Hello world! +2016/07/14 16:47:04 [main] main.go:15 Hello world! +exit with status 1 +``` +### Error logging +```go +01 package main +02 +03 import "github.com/farshidtz/elog" +04 +05 var logger *elog.Logger +06 +07 func main() { +08 logger = elog.New("[main] ", nil) +09 +10 v, err := divide(10, 0) +11 if err != nil { +12 logger.Fatalln(err) +13 } +14 logger.Println(v) +15 } +16 +17 func divide(a, b int) (float64, error) { +18 if b == 0 { +19 return 0, logger.Errorf("Cannot divide by zero") +20 // The error is logged in debugging mode +21 } +22 return float64(a) / float64(b), nil +23 } +``` +Debugging not enabled +``` +2016/07/20 16:38:31 [main] main.go:12: Cannot divide by zero +``` +Debugging enabled +``` +2016/07/20 16:38:31 [debug] main.go:19: Cannot divide by zero +2016/07/20 16:38:31 [main] main.go:12: Cannot divide by zero +``` +### Configuration +```go +10 logger := elog.New("[I] ", &elog.Config{ +11 TimeFormat: time.RFC3339, +12 DebugPrefix: "[D] ", +13 }) +14 +15 logger.Println("Hello world!") +16 logger.Debugln("Hello world!") +17 logger.Fatalln("Hello world!") +``` +Debugging not enabled +``` +2016-07-14T16:57:15Z [I] Hello world! +2016-07-14T16:57:15Z [I] Hello world! +exit with status 1 +``` +Debugging enabled +``` +2016-07-14T16:57:15Z [I] main.go:15 Hello world! +2016-07-14T16:57:15Z [D] main.go:16 Hello world! +2016-07-14T16:57:15Z [I] main.go:17 Hello world! +exit with status 1 +``` +### Initialization with init() +Alternatively, a global logger can be instantiated in the init() function and used throughout the package. +```go +import "github.com/farshidtz/elog" + +var logger *elog.Logger + +func init() { + logger = elog.New("[main] ", nil) +} +``` + +### Enable debugging with a flag +```go +package main + +import ( + "github.com/farshidtz/elog" + "flag" +) + +var debugFlag = flag.Bool("d", false, "Enable debugging") +func main() { + flag.Parse() + logger := elog.New("[main] ", &elog.Config{ + DebugEnabled: debugFlag, + }) + + logger.Println("Hello World!") + logger.Debugln("Hello World!") +} +``` +Example (Windows PowerShell): +``` +PS C:\logging> .\main.exe +2016/07/20 15:55:31 [main] Hello World! + +PS C:\logging> .\main.exe -d +2016/07/20 15:55:32 [main] main.go:15: Hello World! +2016/07/20 15:55:32 [debug] main.go:16: Hello World! +``` + +### Setting an environment variable +``` +Unix: +export DEBUG=1 + +Command Prompt: +set DEBUG=1 + +PowerShell: +$env:DEBUG=1 +``` +## Documentation +For Go log's build-in methods: [golang.org/pkg/log](https://golang.org/pkg/log) + +For extended elog methods: [godoc.org/github.com/farshidtz/elog](https://godoc.org/github.com/farshidtz/elog) \ No newline at end of file diff --git a/vendor/github.com/farshidtz/elog/config.go b/vendor/github.com/farshidtz/elog/config.go new file mode 100644 index 00000000..29fd179c --- /dev/null +++ b/vendor/github.com/farshidtz/elog/config.go @@ -0,0 +1,99 @@ +package elog + +import ( + "fmt" + "io" + "log" + "os" + "strconv" + "strings" +) + +const ( + NoTrace = 1 // no file name or line lumber + LongFile = log.Llongfile // absolute file path and line number: /home/user/go/main.go:15 + ShortFile = log.Lshortfile // file name and line number: main.go:15 +) + +type Config struct { + // Writer is the destination to which log data is written + // Default: os.Stdout + Writer io.Writer + // TimeFormat is the format of time in the prefix (compatible with https://golang.org/pkg/time/#pkg-constants) + // Default: "2006/01/02 15:04:05" + TimeFormat string + // Trace is to control the tracing information + // Default: NoTrace + Trace int + // DebugEnabled can be used to pass a parsed command-line boolean flag to enable the debugging + // Default: nil + DebugEnabled *bool + // DebugEnvVar is the environment variable used to enable debugging when set to 1 + // Default: "DEBUG" + DebugEnvVar string + // DebugPrefix is the prefix used when logging with Debug methods and Errorf + // Default: "[debug] " + DebugPrefix string + // DebugTrace is to control the tracing information when in debugging mode + // Default: ShortFile + DebugTrace int +} + +func initConfig(config *Config) *Config { + var conf = &Config{ + // default configurations + Writer: os.Stdout, + TimeFormat: "2006/01/02 15:04:05", + Trace: NoTrace, + // debugging config + DebugEnvVar: "DEBUG", + DebugEnabled: nil, + DebugPrefix: "[debug] ", + DebugTrace: ShortFile, + } + + if config != nil { + if config.Writer != nil { + conf.Writer = config.Writer + } + if config.TimeFormat != "" { + conf.TimeFormat = config.TimeFormat + } + if config.Trace == NoTrace || config.Trace == 0 { + conf.Trace = 0 + } else { + conf.Trace = config.Trace + } + // debugging conf + if config.DebugEnabled != nil { + conf.DebugEnabled = config.DebugEnabled + } + if config.DebugEnvVar != "" { + conf.DebugEnvVar = config.DebugEnvVar + } + if config.DebugPrefix != "" { + conf.DebugPrefix = config.DebugPrefix + } + if config.DebugTrace == NoTrace { + conf.DebugTrace = 0 + } else if config.DebugTrace == 0 { + conf.DebugTrace = ShortFile // default + } else { + conf.DebugTrace = config.DebugTrace + + } + } + + conf.TimeFormat = fmt.Sprintf("%s ", strings.TrimSpace(conf.TimeFormat)) + if conf.DebugEnabled == nil { + var debug bool + // Enable debugging if environment variable is set + v, err := strconv.Atoi(os.Getenv(conf.DebugEnvVar)) + if err == nil && v == 1 { + debug = true + } + conf.DebugEnabled = &debug + } + + return conf +} diff --git a/vendor/github.com/farshidtz/elog/doc.go b/vendor/github.com/farshidtz/elog/doc.go new file mode 100644 index 00000000..24a67d3c --- /dev/null +++ b/vendor/github.com/farshidtz/elog/doc.go @@ -0,0 +1,30 @@ +/* + +Package elog extends Go's built-in log package to enable levelled logging with improved formatting. + +Methods + +Inherited from the log package (https://golang.org/pkg/log): + func (l *Logger) Print(v ...interface{}) + func (l *Logger) Printf(format string, v ...interface{}) + func (l *Logger) Println(v ...interface{}) + func (l *Logger) Output(calldepth int, s string) error + + func (l *Logger) Fatal(v ...interface{}) + func (l *Logger) Fatalf(format string, v ...interface{}) + func (l *Logger) Fatalln(v ...interface{}) + + func (l *Logger) Panic(v ...interface{}) + func (l *Logger) Panicf(format string, v ...interface{}) + func (l *Logger) Panicln(v ...interface{}) + +Extensions: + func (l *Logger) Debug(a ...interface{}) + func (l *Logger) Debugf(format string, a ...interface{}) + func (l *Logger) Debugln(a ...interface{}) + func (l *Logger) DebugOutput(calldepth int, s string) + + func (l *Logger) Errorf(format string, a ...interface{}) error + +*/ +package elog \ No newline at end of file diff --git a/vendor/github.com/farshidtz/elog/go.mod b/vendor/github.com/farshidtz/elog/go.mod new file mode 100644 index 00000000..1a6b6685 --- /dev/null +++ b/vendor/github.com/farshidtz/elog/go.mod @@ -0,0 +1,3 @@ +module github.com/farshidtz/elog + +go 1.12 diff --git a/vendor/github.com/farshidtz/elog/logger.go b/vendor/github.com/farshidtz/elog/logger.go new file mode 100644 index 00000000..da038032 --- /dev/null +++ b/vendor/github.com/farshidtz/elog/logger.go @@ -0,0 +1,97 @@ +package elog + +import ( + "fmt" + "log" +) + +type Logger struct { + *log.Logger + debug *log.Logger +} + +/* +New creates a new Logger. +The prefix variable is the prefix used when logging with Print, Fatal, and Panic methods. +The config variable is a struct with optional fields. + +When config is set to nil, the default configuration will be used. +*/ +func New(prefix string, config *Config) *Logger { + conf := initConfig(config) + + var logger Logger + if *conf.DebugEnabled { + logger.Logger = log.New(&writer{conf.Writer, conf.TimeFormat}, prefix, conf.DebugTrace) + logger.debug = log.New(&writer{conf.Writer, conf.TimeFormat}, conf.DebugPrefix, conf.DebugTrace) + } else { + logger.Logger = log.New(&writer{conf.Writer, conf.TimeFormat}, prefix, conf.Trace) + } + return &logger +} + +// Debug has similar arguments to those of fmt.Print. It prints to the logger only when debugging is enabled. +func (l *Logger) Debug(a ...interface{}) { + if l.debug != nil { + l.debug.Output(2, fmt.Sprint(a...)) + } +} + +// Debugf has similar arguments to those of fmt.Printf. It prints to the logger only when debugging is enabled. +func (l *Logger) Debugf(format string, a ...interface{}) { + if l.debug != nil { + l.debug.Output(2, fmt.Sprintf(format, a...)) + } +} + +// Debugln has similar arguments to those of fmt.Println. It prints to the logger only when debugging is enabled. +func (l *Logger) Debugln(a ...interface{}) { + if l.debug != nil { + l.debug.Output(2, fmt.Sprintln(a...)) + } +} + +/* +DebugOutput is similar to Debug but allows control over the scope of tracing. (log.Output for debugging) + +E.g. : + [main.go] + + 10 apiError("error message") + 11 + 12 // A function to log formatted API errors + 13 func apiError(s string){ + 14 logger.DebugOutput(2, "API Error:", s) + 15 } + + Prints: 2016/07/19 17:34:10 [debug] main.go:10: API Error: error message +*/ +func (l *Logger) DebugOutput(calldepth int, s string) { + if l.debug != nil { + l.debug.Output(calldepth+1, s) + } +} + +/* +Errorf has similar arguments and signature to those of fmt.Errorf. In addition to formatting and returning error, Errorf prints the error message to the logger when debugging is enabled. + +E.g. : + [example.go] + + 19 func DoSomething() error { + 20 err, v := divide(10, 0) + 21 if err != nil { + 22 return logger.Errorf("Division error: %s", err) + 23 } + 24 + 25 return nil + 24 } + + Prints: 2016/07/19 17:44:22 [debug] example.go:22: Division error: cannot divide by zero +*/ +func (l *Logger) Errorf(format string, a ...interface{}) error { + if l.debug != nil { + l.debug.Output(2, fmt.Sprintf(format, a...)) + } + return fmt.Errorf(format, a...) +} diff --git a/vendor/github.com/farshidtz/elog/writer.go b/vendor/github.com/farshidtz/elog/writer.go new file mode 100644 index 00000000..03e0c442 --- /dev/null +++ b/vendor/github.com/farshidtz/elog/writer.go @@ -0,0 +1,33 @@ +package elog + +import ( + "io" + "time" +) + +type writer struct { + io.Writer + timeFormat string +} + +func (w writer) Write(b []byte) (n int, err error) { + return w.Writer.Write(append([]byte(time.Now().Format(w.timeFormat)), b...)) +} + +/* +NewWriter returns a new io.Writer that writes timestamps as prefix + +E.g. usage: + + logger := log.New(elog.NewWriter(os.Stdout), "[info] ", 0) + logger.Println("Hello.") + + Prints: 2016/07/19 17:34:10 [info] Hello. +*/ +func NewWriter(w io.Writer, timeFormat ...string) *writer { + tf := "2006-01-02 15:04:05 " + if len(timeFormat) == 1 { + tf = timeFormat[0] + } + return &writer{w, tf} +} diff --git a/vendor/github.com/farshidtz/mqtt-match/.travis.yml b/vendor/github.com/farshidtz/mqtt-match/.travis.yml new file mode 100644 index 00000000..e50fcc14 --- /dev/null +++ b/vendor/github.com/farshidtz/mqtt-match/.travis.yml @@ -0,0 +1,4 @@ +language: go + +go: + - 1.x diff --git a/vendor/github.com/farshidtz/mqtt-match/LICENSE b/vendor/github.com/farshidtz/mqtt-match/LICENSE new file mode 100644 index 00000000..ef527e9e --- /dev/null +++ b/vendor/github.com/farshidtz/mqtt-match/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Farshid Tavakolizadeh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/farshidtz/mqtt-match/README.md b/vendor/github.com/farshidtz/mqtt-match/README.md new file mode 100644 index 00000000..86c88bc9 --- /dev/null +++ b/vendor/github.com/farshidtz/mqtt-match/README.md @@ -0,0 +1,21 @@ +# mqtt-match [![GoDoc](https://godoc.org/github.com/farshidtz/mqtt-match?status.svg)](https://godoc.org/github.com/farshidtz/mqtt-match) [![Build Status](https://travis-ci.org/farshidtz/mqtt-match.svg?branch=master)](https://travis-ci.org/farshidtz/mqtt-match) + +Match mqtt formatted topic strings to strings, e.g. `foo/+` should match `foo/bar`. + +### Usage + +```go +package main + +import ( + "fmt" + mqtttopic "github.com/farshidtz/mqtt-match" +) + +func main(){ + fmt.Println(mqtttopic.Match("foo/+", "foo/bar")) + // true +} +``` +### Copyrights Notice +This package is a translation of [mqtt-match](https://github.com/ralphtheninja/mqtt-match) for Go. diff --git a/vendor/github.com/farshidtz/mqtt-match/match.go b/vendor/github.com/farshidtz/mqtt-match/match.go new file mode 100644 index 00000000..07bdeccb --- /dev/null +++ b/vendor/github.com/farshidtz/mqtt-match/match.go @@ -0,0 +1,22 @@ +package mqttmatch + +import "strings" + +// Match checks whether the filter and topic match +func Match(filter, topic string) bool { + filterArray := strings.Split(filter, "/") + topicArray := strings.Split(topic, "/") + for i := 0; i < len(filterArray); i++ { + if i >= len(topicArray) { + return false + } + if filterArray[i] == "#" { + return true + } + if filterArray[i] != "+" && filterArray[i] != topicArray[i] { + return false + } + } + + return len(filterArray) == len(topicArray) +} diff --git a/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp b/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp deleted file mode 100644 index fc31f517..00000000 --- a/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp +++ /dev/null @@ -1,77 +0,0 @@ -/* -To build the snappytool binary: -g++ main.cpp /usr/lib/libsnappy.a -o snappytool -or, if you have built the C++ snappy library from source: -g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool -after running "make" from your snappy checkout directory. -*/ - -#include -#include -#include -#include - -#include "snappy.h" - -#define N 1000000 - -char dst[N]; -char src[N]; - -int main(int argc, char** argv) { - // Parse args. - if (argc != 2) { - fprintf(stderr, "exactly one of -d or -e must be given\n"); - return 1; - } - bool decode = strcmp(argv[1], "-d") == 0; - bool encode = strcmp(argv[1], "-e") == 0; - if (decode == encode) { - fprintf(stderr, "exactly one of -d or -e must be given\n"); - return 1; - } - - // Read all of stdin into src[:s]. - size_t s = 0; - while (1) { - if (s == N) { - fprintf(stderr, "input too large\n"); - return 1; - } - ssize_t n = read(0, src+s, N-s); - if (n == 0) { - break; - } - if (n < 0) { - fprintf(stderr, "read error: %s\n", strerror(errno)); - // TODO: handle EAGAIN, EINTR? - return 1; - } - s += n; - } - - // Encode or decode src[:s] to dst[:d], and write to stdout. - size_t d = 0; - if (encode) { - if (N < snappy::MaxCompressedLength(s)) { - fprintf(stderr, "input too large after encoding\n"); - return 1; - } - snappy::RawCompress(src, s, dst, &d); - } else { - if (!snappy::GetUncompressedLength(src, s, &d)) { - fprintf(stderr, "could not get uncompressed length\n"); - return 1; - } - if (N < d) { - fprintf(stderr, "input too large after decoding\n"); - return 1; - } - if (!snappy::RawUncompress(src, s, dst)) { - fprintf(stderr, "input was not valid Snappy-compressed data\n"); - return 1; - } - } - write(1, dst, d); - return 0; -} diff --git a/vendor/github.com/golang/snappy/golden_test.go b/vendor/github.com/golang/snappy/golden_test.go deleted file mode 100644 index e4496f92..00000000 --- a/vendor/github.com/golang/snappy/golden_test.go +++ /dev/null @@ -1,1965 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -// extendMatchGoldenTestCases is the i and j arguments, and the returned value, -// for every extendMatch call issued when encoding the -// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the -// extendMatch implementation. -// -// It was generated manually by adding some print statements to the (pure Go) -// extendMatch implementation: -// -// func extendMatch(src []byte, i, j int) int { -// i0, j0 := i, j -// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { -// } -// println("{", i0, ",", j0, ",", j, "},") -// return j -// } -// -// and running "go test -test.run=EncodeGoldenInput -tags=noasm". -var extendMatchGoldenTestCases = []struct { - i, j, want int -}{ - {11, 61, 62}, - {80, 81, 82}, - {86, 87, 101}, - {85, 133, 149}, - {152, 153, 162}, - {133, 168, 193}, - {168, 207, 225}, - {81, 255, 275}, - {278, 279, 283}, - {306, 417, 417}, - {373, 428, 430}, - {389, 444, 447}, - {474, 510, 512}, - {465, 533, 533}, - {47, 547, 547}, - {307, 551, 554}, - {420, 582, 587}, - {309, 604, 604}, - {604, 625, 625}, - {538, 629, 629}, - {328, 640, 640}, - {573, 645, 645}, - {319, 657, 657}, - {30, 664, 664}, - {45, 679, 680}, - {621, 684, 684}, - {376, 700, 700}, - {33, 707, 708}, - {601, 733, 733}, - {334, 744, 745}, - {625, 758, 759}, - {382, 763, 763}, - {550, 769, 771}, - {533, 789, 789}, - {804, 813, 813}, - {342, 841, 842}, - {742, 847, 847}, - {74, 852, 852}, - {810, 864, 864}, - {758, 868, 869}, - {714, 883, 883}, - {582, 889, 891}, - {61, 934, 935}, - {894, 942, 942}, - {939, 949, 949}, - {785, 956, 957}, - {886, 978, 978}, - {792, 998, 998}, - {998, 1005, 1005}, - {572, 1032, 1032}, - {698, 1051, 1053}, - {599, 1067, 1069}, - {1056, 1079, 1079}, - {942, 1089, 1090}, - {831, 1094, 1096}, - {1088, 1100, 1103}, - {732, 1113, 1114}, - {1037, 1118, 1118}, - {872, 1128, 1130}, - {1079, 1140, 1142}, - {332, 1162, 1162}, - {207, 1168, 1186}, - {1189, 1190, 1225}, - {105, 1229, 1230}, - {79, 1256, 1257}, - {1190, 1261, 1283}, - {255, 1306, 1306}, - {1319, 1339, 1358}, - {364, 1370, 1370}, - {955, 1378, 1380}, - {122, 1403, 1403}, - {1325, 1407, 1419}, - {664, 1423, 1424}, - {941, 1461, 1463}, - {867, 1477, 1478}, - {757, 1488, 1489}, - {1140, 1499, 1499}, - {31, 1506, 1506}, - {1487, 1510, 1512}, - {1089, 1520, 1521}, - {1467, 1525, 1529}, - {1394, 1537, 1537}, - {1499, 1541, 1541}, - {367, 1558, 1558}, - {1475, 1564, 1564}, - {1525, 1568, 1571}, - {1541, 1582, 1583}, - {864, 1587, 1588}, - {704, 1597, 1597}, - {336, 1602, 1602}, - {1383, 1613, 1613}, - {1498, 1617, 1618}, - {1051, 1623, 1625}, - {401, 1643, 1645}, - {1072, 1654, 1655}, - {1067, 1667, 1669}, - {699, 1673, 1674}, - {1587, 1683, 1684}, - {920, 1696, 1696}, - {1505, 1710, 1710}, - {1550, 1723, 1723}, - {996, 1727, 1727}, - {833, 1733, 1734}, - {1638, 1739, 1740}, - {1654, 1744, 1744}, - {753, 1761, 1761}, - {1548, 1773, 1773}, - {1568, 1777, 1780}, - {1683, 1793, 1794}, - {948, 1801, 1801}, - {1666, 1805, 1808}, - {1502, 1814, 1814}, - {1696, 1822, 1822}, - {502, 1836, 1837}, - {917, 1843, 1843}, - {1733, 1854, 1855}, - {970, 1859, 1859}, - {310, 1863, 1863}, - {657, 1872, 1872}, - {1005, 1876, 1876}, - {1662, 1880, 1880}, - {904, 1892, 1892}, - {1427, 1910, 1910}, - {1772, 1929, 1930}, - {1822, 1937, 1940}, - {1858, 1949, 1950}, - {1602, 1956, 1956}, - {1150, 1962, 1962}, - {1504, 1966, 1967}, - {51, 1971, 1971}, - {1605, 1979, 1979}, - {1458, 1983, 1988}, - {1536, 2001, 2006}, - {1373, 2014, 2018}, - {1494, 2025, 2025}, - {1667, 2029, 2031}, - {1592, 2035, 2035}, - {330, 2045, 2045}, - {1376, 2053, 2053}, - {1991, 2058, 2059}, - {1635, 2065, 2065}, - {1992, 2073, 2074}, - {2014, 2080, 2081}, - {1546, 2085, 2087}, - {59, 2099, 2099}, - {1996, 2106, 2106}, - {1836, 2110, 2110}, - {2068, 2114, 2114}, - {1338, 2122, 2122}, - {1562, 2128, 2130}, - {1934, 2134, 2134}, - {2114, 2141, 2142}, - {977, 2149, 2150}, - {956, 2154, 2155}, - {1407, 2162, 2162}, - {1773, 2166, 2166}, - {883, 2171, 2171}, - {623, 2175, 2178}, - {1520, 2191, 2192}, - {1162, 2200, 2200}, - {912, 2204, 2204}, - {733, 2208, 2208}, - {1777, 2212, 2215}, - {1532, 2219, 2219}, - {718, 2223, 2225}, - {2069, 2229, 2229}, - {2207, 2245, 2246}, - {1139, 2264, 2264}, - {677, 2274, 2274}, - {2099, 2279, 2279}, - {1863, 2283, 2283}, - {1966, 2305, 2306}, - {2279, 2313, 2313}, - {1628, 2319, 2319}, - {755, 2329, 2329}, - {1461, 2334, 2334}, - {2117, 2340, 2340}, - {2313, 2349, 2349}, - {1859, 2353, 2353}, - {1048, 2362, 2362}, - {895, 2366, 2366}, - {2278, 2373, 2373}, - {1884, 2377, 2377}, - {1402, 2387, 2392}, - {700, 2398, 2398}, - {1971, 2402, 2402}, - {2009, 2419, 2419}, - {1441, 2426, 2428}, - {2208, 2432, 2432}, - {2038, 2436, 2436}, - {932, 2443, 2443}, - {1759, 2447, 2448}, - {744, 2452, 2452}, - {1875, 2458, 2458}, - {2405, 2468, 2468}, - {1596, 2472, 2473}, - {1953, 2480, 2482}, - {736, 2487, 2487}, - {1913, 2493, 2493}, - {774, 2497, 2497}, - {1484, 2506, 2508}, - {2432, 2512, 2512}, - {752, 2519, 2519}, - {2497, 2523, 2523}, - {2409, 2528, 2529}, - {2122, 2533, 2533}, - {2396, 2537, 2538}, - {2410, 2547, 2548}, - {1093, 2555, 2560}, - {551, 2564, 2565}, - {2268, 2569, 2569}, - {1362, 2580, 2580}, - {1916, 2584, 2585}, - {994, 2589, 2590}, - {1979, 2596, 2596}, - {1041, 2602, 2602}, - {2104, 2614, 2616}, - {2609, 2621, 2628}, - {2329, 2638, 2638}, - {2211, 2657, 2658}, - {2638, 2662, 2667}, - {2578, 2676, 2679}, - {2153, 2685, 2686}, - {2608, 2696, 2697}, - {598, 2712, 2712}, - {2620, 2719, 2720}, - {1888, 2724, 2728}, - {2709, 2732, 2732}, - {1365, 2739, 2739}, - {784, 2747, 2748}, - {424, 2753, 2753}, - {2204, 2759, 2759}, - {812, 2768, 2769}, - {2455, 2773, 2773}, - {1722, 2781, 2781}, - {1917, 2792, 2792}, - {2705, 2799, 2799}, - {2685, 2806, 2807}, - {2742, 2811, 2811}, - {1370, 2818, 2818}, - {2641, 2830, 2830}, - {2512, 2837, 2837}, - {2457, 2841, 2841}, - {2756, 2845, 2845}, - {2719, 2855, 2855}, - {1423, 2859, 2859}, - {2849, 2863, 2865}, - {1474, 2871, 2871}, - {1161, 2875, 2876}, - {2282, 2880, 2881}, - {2746, 2888, 2888}, - {1783, 2893, 2893}, - {2401, 2899, 2900}, - {2632, 2920, 2923}, - {2422, 2928, 2930}, - {2715, 2939, 2939}, - {2162, 2943, 2943}, - {2859, 2947, 2947}, - {1910, 2951, 2951}, - {1431, 2955, 2956}, - {1439, 2964, 2964}, - {2501, 2968, 2969}, - {2029, 2973, 2976}, - {689, 2983, 2984}, - {1658, 2988, 2988}, - {1031, 2996, 2996}, - {2149, 3001, 3002}, - {25, 3009, 3013}, - {2964, 3023, 3023}, - {953, 3027, 3028}, - {2359, 3036, 3036}, - {3023, 3049, 3049}, - {2880, 3055, 3056}, - {2973, 3076, 3077}, - {2874, 3090, 3090}, - {2871, 3094, 3094}, - {2532, 3100, 3100}, - {2938, 3107, 3108}, - {350, 3115, 3115}, - {2196, 3119, 3121}, - {1133, 3127, 3129}, - {1797, 3134, 3150}, - {3032, 3158, 3158}, - {3016, 3172, 3172}, - {2533, 3179, 3179}, - {3055, 3187, 3188}, - {1384, 3192, 3193}, - {2799, 3199, 3199}, - {2126, 3203, 3207}, - {2334, 3215, 3215}, - {2105, 3220, 3221}, - {3199, 3229, 3229}, - {2891, 3233, 3233}, - {855, 3240, 3240}, - {1852, 3253, 3256}, - {2140, 3263, 3263}, - {1682, 3268, 3270}, - {3243, 3274, 3274}, - {924, 3279, 3279}, - {2212, 3283, 3283}, - {2596, 3287, 3287}, - {2999, 3291, 3291}, - {2353, 3295, 3295}, - {2480, 3302, 3304}, - {1959, 3308, 3311}, - {3000, 3318, 3318}, - {845, 3330, 3330}, - {2283, 3334, 3334}, - {2519, 3342, 3342}, - {3325, 3346, 3348}, - {2397, 3353, 3354}, - {2763, 3358, 3358}, - {3198, 3363, 3364}, - {3211, 3368, 3372}, - {2950, 3376, 3377}, - {3245, 3388, 3391}, - {2264, 3398, 3398}, - {795, 3403, 3403}, - {3287, 3407, 3407}, - {3358, 3411, 3411}, - {3317, 3415, 3415}, - {3232, 3431, 3431}, - {2128, 3435, 3437}, - {3236, 3441, 3441}, - {3398, 3445, 3446}, - {2814, 3450, 3450}, - {3394, 3466, 3466}, - {2425, 3470, 3470}, - {3330, 3476, 3476}, - {1612, 3480, 3480}, - {1004, 3485, 3486}, - {2732, 3490, 3490}, - {1117, 3494, 3495}, - {629, 3501, 3501}, - {3087, 3514, 3514}, - {684, 3518, 3518}, - {3489, 3522, 3524}, - {1760, 3529, 3529}, - {617, 3537, 3537}, - {3431, 3541, 3541}, - {997, 3547, 3547}, - {882, 3552, 3553}, - {2419, 3558, 3558}, - {610, 3562, 3563}, - {1903, 3567, 3569}, - {3005, 3575, 3575}, - {3076, 3585, 3586}, - {3541, 3590, 3590}, - {3490, 3594, 3594}, - {1899, 3599, 3599}, - {3545, 3606, 3606}, - {3290, 3614, 3615}, - {2056, 3619, 3620}, - {3556, 3625, 3625}, - {3294, 3632, 3633}, - {637, 3643, 3644}, - {3609, 3648, 3650}, - {3175, 3658, 3658}, - {3498, 3665, 3665}, - {1597, 3669, 3669}, - {1983, 3673, 3673}, - {3215, 3682, 3682}, - {3544, 3689, 3689}, - {3694, 3698, 3698}, - {3228, 3715, 3716}, - {2594, 3720, 3722}, - {3573, 3726, 3726}, - {2479, 3732, 3735}, - {3191, 3741, 3742}, - {1113, 3746, 3747}, - {2844, 3751, 3751}, - {3445, 3756, 3757}, - {3755, 3766, 3766}, - {3421, 3775, 3780}, - {3593, 3784, 3786}, - {3263, 3796, 3796}, - {3469, 3806, 3806}, - {2602, 3815, 3815}, - {723, 3819, 3821}, - {1608, 3826, 3826}, - {3334, 3830, 3830}, - {2198, 3835, 3835}, - {2635, 3840, 3840}, - {3702, 3852, 3853}, - {3406, 3858, 3859}, - {3681, 3867, 3870}, - {3407, 3880, 3880}, - {340, 3889, 3889}, - {3772, 3893, 3893}, - {593, 3897, 3897}, - {2563, 3914, 3916}, - {2981, 3929, 3929}, - {1835, 3933, 3934}, - {3906, 3951, 3951}, - {1459, 3958, 3958}, - {3889, 3974, 3974}, - {2188, 3982, 3982}, - {3220, 3986, 3987}, - {3585, 3991, 3993}, - {3712, 3997, 4001}, - {2805, 4007, 4007}, - {1879, 4012, 4013}, - {3618, 4018, 4018}, - {1145, 4031, 4032}, - {3901, 4037, 4037}, - {2772, 4046, 4047}, - {2802, 4053, 4054}, - {3299, 4058, 4058}, - {3725, 4066, 4066}, - {2271, 4070, 4070}, - {385, 4075, 4076}, - {3624, 4089, 4090}, - {3745, 4096, 4098}, - {1563, 4102, 4102}, - {4045, 4106, 4111}, - {3696, 4115, 4119}, - {3376, 4125, 4126}, - {1880, 4130, 4130}, - {2048, 4140, 4141}, - {2724, 4149, 4149}, - {1767, 4156, 4156}, - {2601, 4164, 4164}, - {2757, 4168, 4168}, - {3974, 4172, 4172}, - {3914, 4178, 4178}, - {516, 4185, 4185}, - {1032, 4189, 4190}, - {3462, 4197, 4198}, - {3805, 4202, 4203}, - {3910, 4207, 4212}, - {3075, 4221, 4221}, - {3756, 4225, 4226}, - {1872, 4236, 4237}, - {3844, 4241, 4241}, - {3991, 4245, 4249}, - {2203, 4258, 4258}, - {3903, 4267, 4268}, - {705, 4272, 4272}, - {1896, 4276, 4276}, - {1955, 4285, 4288}, - {3746, 4302, 4303}, - {2672, 4311, 4311}, - {3969, 4317, 4317}, - {3883, 4322, 4322}, - {1920, 4339, 4340}, - {3527, 4344, 4346}, - {1160, 4358, 4358}, - {3648, 4364, 4366}, - {2711, 4387, 4387}, - {3619, 4391, 4392}, - {1944, 4396, 4396}, - {4369, 4400, 4400}, - {2736, 4404, 4407}, - {2546, 4411, 4412}, - {4390, 4422, 4422}, - {3610, 4426, 4427}, - {4058, 4431, 4431}, - {4374, 4435, 4435}, - {3463, 4445, 4446}, - {1813, 4452, 4452}, - {3669, 4456, 4456}, - {3830, 4460, 4460}, - {421, 4464, 4465}, - {1719, 4471, 4471}, - {3880, 4475, 4475}, - {1834, 4485, 4487}, - {3590, 4491, 4491}, - {442, 4496, 4497}, - {4435, 4501, 4501}, - {3814, 4509, 4509}, - {987, 4513, 4513}, - {4494, 4518, 4521}, - {3218, 4526, 4529}, - {4221, 4537, 4537}, - {2778, 4543, 4545}, - {4422, 4552, 4552}, - {4031, 4558, 4559}, - {4178, 4563, 4563}, - {3726, 4567, 4574}, - {4027, 4578, 4578}, - {4339, 4585, 4587}, - {3796, 4592, 4595}, - {543, 4600, 4613}, - {2855, 4620, 4621}, - {2795, 4627, 4627}, - {3440, 4631, 4632}, - {4279, 4636, 4639}, - {4245, 4643, 4645}, - {4516, 4649, 4650}, - {3133, 4654, 4654}, - {4042, 4658, 4659}, - {3422, 4663, 4663}, - {4046, 4667, 4668}, - {4267, 4672, 4672}, - {4004, 4676, 4677}, - {2490, 4682, 4682}, - {2451, 4697, 4697}, - {3027, 4705, 4705}, - {4028, 4717, 4717}, - {4460, 4721, 4721}, - {2471, 4725, 4727}, - {3090, 4735, 4735}, - {3192, 4739, 4740}, - {3835, 4760, 4760}, - {4540, 4764, 4764}, - {4007, 4772, 4774}, - {619, 4784, 4784}, - {3561, 4789, 4791}, - {3367, 4805, 4805}, - {4490, 4810, 4811}, - {2402, 4815, 4815}, - {3352, 4819, 4822}, - {2773, 4828, 4828}, - {4552, 4832, 4832}, - {2522, 4840, 4841}, - {316, 4847, 4852}, - {4715, 4858, 4858}, - {2959, 4862, 4862}, - {4858, 4868, 4869}, - {2134, 4873, 4873}, - {578, 4878, 4878}, - {4189, 4889, 4890}, - {2229, 4894, 4894}, - {4501, 4898, 4898}, - {2297, 4903, 4903}, - {2933, 4909, 4909}, - {3008, 4913, 4913}, - {3153, 4917, 4917}, - {4819, 4921, 4921}, - {4921, 4932, 4933}, - {4920, 4944, 4945}, - {4814, 4954, 4955}, - {576, 4966, 4966}, - {1854, 4970, 4971}, - {1374, 4975, 4976}, - {3307, 4980, 4980}, - {974, 4984, 4988}, - {4721, 4992, 4992}, - {4898, 4996, 4996}, - {4475, 5006, 5006}, - {3819, 5012, 5012}, - {1948, 5019, 5021}, - {4954, 5027, 5029}, - {3740, 5038, 5040}, - {4763, 5044, 5045}, - {1936, 5051, 5051}, - {4844, 5055, 5060}, - {4215, 5069, 5072}, - {1146, 5076, 5076}, - {3845, 5082, 5082}, - {4865, 5090, 5090}, - {4624, 5094, 5094}, - {4815, 5098, 5098}, - {5006, 5105, 5105}, - {4980, 5109, 5109}, - {4795, 5113, 5115}, - {5043, 5119, 5121}, - {4782, 5129, 5129}, - {3826, 5139, 5139}, - {3876, 5156, 5156}, - {3111, 5167, 5171}, - {1470, 5177, 5177}, - {4431, 5181, 5181}, - {546, 5189, 5189}, - {4225, 5193, 5193}, - {1672, 5199, 5201}, - {4207, 5205, 5209}, - {4220, 5216, 5217}, - {4658, 5224, 5225}, - {3295, 5235, 5235}, - {2436, 5239, 5239}, - {2349, 5246, 5246}, - {2175, 5250, 5250}, - {5180, 5257, 5258}, - {3161, 5263, 5263}, - {5105, 5272, 5272}, - {3552, 5282, 5282}, - {4944, 5299, 5300}, - {4130, 5312, 5313}, - {902, 5323, 5323}, - {913, 5327, 5327}, - {2987, 5333, 5334}, - {5150, 5344, 5344}, - {5249, 5348, 5348}, - {1965, 5358, 5359}, - {5330, 5364, 5364}, - {2012, 5373, 5377}, - {712, 5384, 5386}, - {5235, 5390, 5390}, - {5044, 5398, 5399}, - {564, 5406, 5406}, - {39, 5410, 5410}, - {4642, 5422, 5425}, - {4421, 5437, 5438}, - {2347, 5449, 5449}, - {5333, 5453, 5454}, - {4136, 5458, 5459}, - {3793, 5468, 5468}, - {2243, 5480, 5480}, - {4889, 5492, 5493}, - {4295, 5504, 5504}, - {2785, 5511, 5511}, - {2377, 5518, 5518}, - {3662, 5525, 5525}, - {5097, 5529, 5530}, - {4781, 5537, 5538}, - {4697, 5547, 5548}, - {436, 5552, 5553}, - {5542, 5558, 5558}, - {3692, 5562, 5562}, - {2696, 5568, 5569}, - {4620, 5578, 5578}, - {2898, 5590, 5590}, - {5557, 5596, 5618}, - {2797, 5623, 5625}, - {2792, 5629, 5629}, - {5243, 5633, 5633}, - {5348, 5637, 5637}, - {5547, 5643, 5643}, - {4296, 5654, 5655}, - {5568, 5662, 5662}, - {3001, 5670, 5671}, - {3794, 5679, 5679}, - {4006, 5685, 5686}, - {4969, 5690, 5692}, - {687, 5704, 5704}, - {4563, 5708, 5708}, - {1723, 5738, 5738}, - {649, 5742, 5742}, - {5163, 5748, 5755}, - {3907, 5759, 5759}, - {3074, 5764, 5764}, - {5326, 5771, 5771}, - {2951, 5776, 5776}, - {5181, 5780, 5780}, - {2614, 5785, 5788}, - {4709, 5794, 5794}, - {2784, 5799, 5799}, - {5518, 5803, 5803}, - {4155, 5812, 5815}, - {921, 5819, 5819}, - {5224, 5823, 5824}, - {2853, 5830, 5836}, - {5776, 5840, 5840}, - {2955, 5844, 5845}, - {5745, 5853, 5853}, - {3291, 5857, 5857}, - {2988, 5861, 5861}, - {2647, 5865, 5865}, - {5398, 5869, 5870}, - {1085, 5874, 5875}, - {4906, 5881, 5881}, - {802, 5886, 5886}, - {5119, 5890, 5893}, - {5802, 5899, 5900}, - {3415, 5904, 5904}, - {5629, 5908, 5908}, - {3714, 5912, 5914}, - {5558, 5921, 5921}, - {2710, 5927, 5928}, - {1094, 5932, 5934}, - {2653, 5940, 5941}, - {4735, 5954, 5954}, - {5861, 5958, 5958}, - {1040, 5971, 5971}, - {5514, 5977, 5977}, - {5048, 5981, 5982}, - {5953, 5992, 5993}, - {3751, 5997, 5997}, - {4991, 6001, 6002}, - {5885, 6006, 6007}, - {5529, 6011, 6012}, - {4974, 6019, 6020}, - {5857, 6024, 6024}, - {3483, 6032, 6032}, - {3594, 6036, 6036}, - {1997, 6040, 6040}, - {5997, 6044, 6047}, - {5197, 6051, 6051}, - {1764, 6055, 6055}, - {6050, 6059, 6059}, - {5239, 6063, 6063}, - {5049, 6067, 6067}, - {5957, 6073, 6074}, - {1022, 6078, 6078}, - {3414, 6083, 6084}, - {3809, 6090, 6090}, - {4562, 6095, 6096}, - {5878, 6104, 6104}, - {594, 6108, 6109}, - {3353, 6115, 6116}, - {4992, 6120, 6121}, - {2424, 6125, 6125}, - {4484, 6130, 6130}, - {3900, 6134, 6135}, - {5793, 6139, 6141}, - {3562, 6145, 6145}, - {1438, 6152, 6153}, - {6058, 6157, 6158}, - {4411, 6162, 6163}, - {4590, 6167, 6171}, - {4748, 6175, 6175}, - {5517, 6183, 6184}, - {6095, 6191, 6192}, - {1471, 6203, 6203}, - {2643, 6209, 6210}, - {450, 6220, 6220}, - {5266, 6226, 6226}, - {2576, 6233, 6233}, - {2607, 6239, 6240}, - {5164, 6244, 6251}, - {6054, 6255, 6255}, - {1789, 6260, 6261}, - {5250, 6265, 6265}, - {6062, 6273, 6278}, - {5990, 6282, 6282}, - {3283, 6286, 6286}, - {5436, 6290, 6290}, - {6059, 6294, 6294}, - {5668, 6298, 6300}, - {3072, 6324, 6329}, - {3132, 6338, 6339}, - {3246, 6343, 6344}, - {28, 6348, 6349}, - {1503, 6353, 6355}, - {6067, 6359, 6359}, - {3384, 6364, 6364}, - {545, 6375, 6376}, - {5803, 6380, 6380}, - {5522, 6384, 6385}, - {5908, 6389, 6389}, - {2796, 6393, 6396}, - {4831, 6403, 6404}, - {6388, 6412, 6412}, - {6005, 6417, 6420}, - {4450, 6430, 6430}, - {4050, 6435, 6435}, - {5372, 6441, 6441}, - {4378, 6447, 6447}, - {6199, 6452, 6452}, - {3026, 6456, 6456}, - {2642, 6460, 6462}, - {6392, 6470, 6470}, - {6459, 6474, 6474}, - {2829, 6487, 6488}, - {2942, 6499, 6504}, - {5069, 6508, 6511}, - {5341, 6515, 6516}, - {5853, 6521, 6525}, - {6104, 6531, 6531}, - {5759, 6535, 6538}, - {4672, 6542, 6543}, - {2443, 6550, 6550}, - {5109, 6554, 6554}, - {6494, 6558, 6560}, - {6006, 6570, 6572}, - {6424, 6576, 6580}, - {4693, 6591, 6592}, - {6439, 6596, 6597}, - {3179, 6601, 6601}, - {5299, 6606, 6607}, - {4148, 6612, 6613}, - {3774, 6617, 6617}, - {3537, 6623, 6624}, - {4975, 6628, 6629}, - {3848, 6636, 6636}, - {856, 6640, 6640}, - {5724, 6645, 6645}, - {6632, 6651, 6651}, - {4630, 6656, 6658}, - {1440, 6662, 6662}, - {4281, 6666, 6667}, - {4302, 6671, 6672}, - {2589, 6676, 6677}, - {5647, 6681, 6687}, - {6082, 6691, 6693}, - {6144, 6698, 6698}, - {6103, 6709, 6710}, - {3710, 6714, 6714}, - {4253, 6718, 6721}, - {2467, 6730, 6730}, - {4778, 6734, 6734}, - {6528, 6738, 6738}, - {4358, 6747, 6747}, - {5889, 6753, 6753}, - {5193, 6757, 6757}, - {5797, 6761, 6761}, - {3858, 6765, 6766}, - {5951, 6776, 6776}, - {6487, 6781, 6782}, - {3282, 6786, 6787}, - {4667, 6797, 6799}, - {1927, 6803, 6806}, - {6583, 6810, 6810}, - {4937, 6814, 6814}, - {6099, 6824, 6824}, - {4415, 6835, 6836}, - {6332, 6840, 6841}, - {5160, 6850, 6850}, - {4764, 6854, 6854}, - {6814, 6858, 6859}, - {3018, 6864, 6864}, - {6293, 6868, 6869}, - {6359, 6877, 6877}, - {3047, 6884, 6886}, - {5262, 6890, 6891}, - {5471, 6900, 6900}, - {3268, 6910, 6912}, - {1047, 6916, 6916}, - {5904, 6923, 6923}, - {5798, 6933, 6938}, - {4149, 6942, 6942}, - {1821, 6946, 6946}, - {3599, 6952, 6952}, - {6470, 6957, 6957}, - {5562, 6961, 6961}, - {6268, 6965, 6967}, - {6389, 6971, 6971}, - {6596, 6975, 6976}, - {6553, 6980, 6981}, - {6576, 6985, 6989}, - {1375, 6993, 6993}, - {652, 6998, 6998}, - {4876, 7002, 7003}, - {5768, 7011, 7013}, - {3973, 7017, 7017}, - {6802, 7025, 7025}, - {6955, 7034, 7036}, - {6974, 7040, 7040}, - {5944, 7044, 7044}, - {6992, 7048, 7054}, - {6872, 7059, 7059}, - {2943, 7063, 7063}, - {6923, 7067, 7067}, - {5094, 7071, 7071}, - {4873, 7075, 7075}, - {5819, 7079, 7079}, - {5945, 7085, 7085}, - {1540, 7090, 7091}, - {2090, 7095, 7095}, - {5024, 7104, 7105}, - {6900, 7109, 7109}, - {6024, 7113, 7114}, - {6000, 7118, 7120}, - {2187, 7124, 7125}, - {6760, 7129, 7130}, - {5898, 7134, 7136}, - {7032, 7144, 7144}, - {4271, 7148, 7148}, - {3706, 7152, 7152}, - {6970, 7156, 7157}, - {7088, 7161, 7163}, - {2718, 7168, 7169}, - {5674, 7175, 7175}, - {4631, 7182, 7182}, - {7070, 7188, 7189}, - {6220, 7196, 7196}, - {3458, 7201, 7202}, - {2041, 7211, 7212}, - {1454, 7216, 7216}, - {5199, 7225, 7227}, - {3529, 7234, 7234}, - {6890, 7238, 7238}, - {3815, 7242, 7243}, - {5490, 7250, 7253}, - {6554, 7257, 7263}, - {5890, 7267, 7269}, - {6877, 7273, 7273}, - {4877, 7277, 7277}, - {2502, 7285, 7285}, - {1483, 7289, 7295}, - {7210, 7304, 7308}, - {6845, 7313, 7316}, - {7219, 7320, 7320}, - {7001, 7325, 7329}, - {6853, 7333, 7334}, - {6120, 7338, 7338}, - {6606, 7342, 7343}, - {7020, 7348, 7350}, - {3509, 7354, 7354}, - {7133, 7359, 7363}, - {3434, 7371, 7374}, - {2787, 7384, 7384}, - {7044, 7388, 7388}, - {6960, 7394, 7395}, - {6676, 7399, 7400}, - {7161, 7404, 7404}, - {7285, 7417, 7418}, - {4558, 7425, 7426}, - {4828, 7430, 7430}, - {6063, 7436, 7436}, - {3597, 7442, 7442}, - {914, 7446, 7446}, - {7320, 7452, 7454}, - {7267, 7458, 7460}, - {5076, 7464, 7464}, - {7430, 7468, 7469}, - {6273, 7473, 7474}, - {7440, 7478, 7487}, - {7348, 7491, 7494}, - {1021, 7510, 7510}, - {7473, 7515, 7515}, - {2823, 7519, 7519}, - {6264, 7527, 7527}, - {7302, 7531, 7531}, - {7089, 7535, 7535}, - {7342, 7540, 7541}, - {3688, 7547, 7551}, - {3054, 7558, 7560}, - {4177, 7566, 7567}, - {6691, 7574, 7575}, - {7156, 7585, 7586}, - {7147, 7590, 7592}, - {7407, 7598, 7598}, - {7403, 7602, 7603}, - {6868, 7607, 7607}, - {6636, 7611, 7611}, - {4805, 7617, 7617}, - {5779, 7623, 7623}, - {7063, 7627, 7627}, - {5079, 7632, 7632}, - {7377, 7637, 7637}, - {7337, 7641, 7642}, - {6738, 7655, 7655}, - {7338, 7659, 7659}, - {6541, 7669, 7671}, - {595, 7675, 7675}, - {7658, 7679, 7680}, - {7647, 7685, 7686}, - {2477, 7690, 7690}, - {5823, 7694, 7694}, - {4156, 7699, 7699}, - {5931, 7703, 7706}, - {6854, 7712, 7712}, - {4931, 7718, 7718}, - {6979, 7722, 7722}, - {5085, 7727, 7727}, - {6965, 7732, 7732}, - {7201, 7736, 7737}, - {3639, 7741, 7743}, - {7534, 7749, 7749}, - {4292, 7753, 7753}, - {3427, 7759, 7763}, - {7273, 7767, 7767}, - {940, 7778, 7778}, - {4838, 7782, 7785}, - {4216, 7790, 7792}, - {922, 7800, 7801}, - {7256, 7810, 7811}, - {7789, 7815, 7819}, - {7225, 7823, 7825}, - {7531, 7829, 7829}, - {6997, 7833, 7833}, - {7757, 7837, 7838}, - {4129, 7842, 7842}, - {7333, 7848, 7849}, - {6776, 7855, 7855}, - {7527, 7859, 7859}, - {4370, 7863, 7863}, - {4512, 7868, 7868}, - {5679, 7880, 7880}, - {3162, 7884, 7885}, - {3933, 7892, 7894}, - {7804, 7899, 7902}, - {6363, 7906, 7907}, - {7848, 7911, 7912}, - {5584, 7917, 7921}, - {874, 7926, 7926}, - {3342, 7930, 7930}, - {4507, 7935, 7937}, - {3672, 7943, 7944}, - {7911, 7948, 7949}, - {6402, 7956, 7956}, - {7940, 7960, 7960}, - {7113, 7964, 7964}, - {1073, 7968, 7968}, - {7740, 7974, 7974}, - {7601, 7978, 7982}, - {6797, 7987, 7988}, - {3528, 7994, 7995}, - {5483, 7999, 7999}, - {5717, 8011, 8011}, - {5480, 8017, 8017}, - {7770, 8023, 8030}, - {2452, 8034, 8034}, - {5282, 8047, 8047}, - {7967, 8051, 8051}, - {1128, 8058, 8066}, - {6348, 8070, 8070}, - {8055, 8077, 8077}, - {7925, 8081, 8086}, - {6810, 8090, 8090}, - {5051, 8101, 8101}, - {4696, 8109, 8110}, - {5129, 8119, 8119}, - {4449, 8123, 8123}, - {7222, 8127, 8127}, - {4649, 8131, 8134}, - {7994, 8138, 8138}, - {5954, 8148, 8148}, - {475, 8152, 8153}, - {7906, 8157, 8157}, - {7458, 8164, 8166}, - {7632, 8171, 8173}, - {3874, 8177, 8183}, - {4391, 8187, 8187}, - {561, 8191, 8191}, - {2417, 8195, 8195}, - {2357, 8204, 8204}, - {2269, 8216, 8218}, - {3968, 8222, 8222}, - {2200, 8226, 8227}, - {3453, 8247, 8247}, - {2439, 8251, 8252}, - {7175, 8257, 8257}, - {976, 8262, 8264}, - {4953, 8273, 8273}, - {4219, 8278, 8278}, - {6, 8285, 8291}, - {5703, 8295, 8296}, - {5272, 8300, 8300}, - {8037, 8304, 8304}, - {8186, 8314, 8314}, - {8304, 8318, 8318}, - {8051, 8326, 8326}, - {8318, 8330, 8330}, - {2671, 8334, 8335}, - {2662, 8339, 8339}, - {8081, 8349, 8350}, - {3328, 8356, 8356}, - {2879, 8360, 8362}, - {8050, 8370, 8371}, - {8330, 8375, 8376}, - {8375, 8386, 8386}, - {4961, 8390, 8390}, - {1017, 8403, 8405}, - {3533, 8416, 8416}, - {4555, 8422, 8422}, - {6445, 8426, 8426}, - {8169, 8432, 8432}, - {990, 8436, 8436}, - {4102, 8440, 8440}, - {7398, 8444, 8446}, - {3480, 8450, 8450}, - {6324, 8462, 8462}, - {7948, 8466, 8467}, - {5950, 8471, 8471}, - {5189, 8476, 8476}, - {4026, 8490, 8490}, - {8374, 8494, 8495}, - {4682, 8501, 8501}, - {7387, 8506, 8506}, - {8164, 8510, 8515}, - {4079, 8524, 8524}, - {8360, 8529, 8531}, - {7446, 8540, 8543}, - {7971, 8547, 8548}, - {4311, 8552, 8552}, - {5204, 8556, 8557}, - {7968, 8562, 8562}, - {7847, 8571, 8573}, - {8547, 8577, 8577}, - {5320, 8581, 8581}, - {8556, 8585, 8586}, - {8504, 8590, 8590}, - {7669, 8602, 8604}, - {5874, 8608, 8609}, - {5828, 8613, 8613}, - {7998, 8617, 8617}, - {8519, 8625, 8625}, - {7250, 8637, 8637}, - {426, 8641, 8641}, - {8436, 8645, 8645}, - {5986, 8649, 8656}, - {8157, 8660, 8660}, - {7182, 8665, 8665}, - {8421, 8675, 8675}, - {8509, 8681, 8681}, - {5137, 8688, 8689}, - {8625, 8694, 8695}, - {5228, 8701, 8702}, - {6661, 8714, 8714}, - {1010, 8719, 8719}, - {6648, 8723, 8723}, - {3500, 8728, 8728}, - {2442, 8735, 8735}, - {8494, 8740, 8741}, - {8171, 8753, 8755}, - {7242, 8763, 8764}, - {4739, 8768, 8769}, - {7079, 8773, 8773}, - {8386, 8777, 8777}, - {8624, 8781, 8787}, - {661, 8791, 8794}, - {8631, 8801, 8801}, - {7753, 8805, 8805}, - {4783, 8809, 8810}, - {1673, 8814, 8815}, - {6623, 8819, 8819}, - {4404, 8823, 8823}, - {8089, 8827, 8828}, - {8773, 8832, 8832}, - {5394, 8836, 8836}, - {6231, 8841, 8843}, - {1015, 8852, 8853}, - {6873, 8857, 8857}, - {6289, 8865, 8865}, - {8577, 8869, 8869}, - {8114, 8873, 8875}, - {8534, 8883, 8883}, - {3007, 8887, 8888}, - {8827, 8892, 8893}, - {4788, 8897, 8900}, - {5698, 8906, 8907}, - {7690, 8911, 8911}, - {6643, 8919, 8919}, - {7206, 8923, 8924}, - {7866, 8929, 8931}, - {8880, 8942, 8942}, - {8630, 8951, 8952}, - {6027, 8958, 8958}, - {7749, 8966, 8967}, - {4932, 8972, 8973}, - {8892, 8980, 8981}, - {634, 9003, 9003}, - {8109, 9007, 9008}, - {8777, 9012, 9012}, - {3981, 9016, 9017}, - {5723, 9025, 9025}, - {7662, 9034, 9038}, - {8955, 9042, 9042}, - {8070, 9060, 9062}, - {8910, 9066, 9066}, - {5363, 9070, 9071}, - {7699, 9075, 9076}, - {8991, 9081, 9081}, - {6850, 9085, 9085}, - {5811, 9092, 9094}, - {9079, 9098, 9102}, - {6456, 9106, 9106}, - {2259, 9111, 9111}, - {4752, 9116, 9116}, - {9060, 9120, 9123}, - {8090, 9127, 9127}, - {5305, 9131, 9132}, - {8623, 9137, 9137}, - {7417, 9141, 9141}, - {6564, 9148, 9149}, - {9126, 9157, 9158}, - {4285, 9169, 9170}, - {8698, 9174, 9174}, - {8869, 9178, 9178}, - {2572, 9182, 9183}, - {6482, 9188, 9190}, - {9181, 9201, 9201}, - {2968, 9208, 9209}, - {2506, 9213, 9215}, - {9127, 9219, 9219}, - {7910, 9225, 9227}, - {5422, 9235, 9239}, - {8813, 9244, 9246}, - {9178, 9250, 9250}, - {8748, 9255, 9255}, - {7354, 9265, 9265}, - {7767, 9269, 9269}, - {7710, 9281, 9283}, - {8826, 9288, 9290}, - {861, 9295, 9295}, - {4482, 9301, 9301}, - {9264, 9305, 9306}, - {8805, 9310, 9310}, - {4995, 9314, 9314}, - {6730, 9318, 9318}, - {7457, 9328, 9328}, - {2547, 9335, 9336}, - {6298, 9340, 9343}, - {9305, 9353, 9354}, - {9269, 9358, 9358}, - {6338, 9370, 9370}, - {7289, 9376, 9379}, - {5780, 9383, 9383}, - {7607, 9387, 9387}, - {2065, 9392, 9392}, - {7238, 9396, 9396}, - {8856, 9400, 9400}, - {8069, 9412, 9413}, - {611, 9420, 9420}, - {7071, 9424, 9424}, - {3089, 9430, 9431}, - {7117, 9435, 9438}, - {1976, 9445, 9445}, - {6640, 9449, 9449}, - {5488, 9453, 9453}, - {8739, 9457, 9459}, - {5958, 9466, 9466}, - {7985, 9470, 9470}, - {8735, 9475, 9475}, - {5009, 9479, 9479}, - {8073, 9483, 9484}, - {2328, 9490, 9491}, - {9250, 9495, 9495}, - {4043, 9502, 9502}, - {7712, 9506, 9506}, - {9012, 9510, 9510}, - {9028, 9514, 9515}, - {2190, 9521, 9524}, - {9029, 9528, 9528}, - {9519, 9532, 9532}, - {9495, 9536, 9536}, - {8527, 9540, 9540}, - {2137, 9550, 9550}, - {8419, 9557, 9557}, - {9383, 9561, 9562}, - {8970, 9575, 9578}, - {8911, 9582, 9582}, - {7828, 9595, 9596}, - {6180, 9600, 9600}, - {8738, 9604, 9607}, - {7540, 9611, 9612}, - {9599, 9616, 9618}, - {9187, 9623, 9623}, - {9294, 9628, 9629}, - {4536, 9639, 9639}, - {3867, 9643, 9643}, - {6305, 9648, 9648}, - {1617, 9654, 9657}, - {5762, 9666, 9666}, - {8314, 9670, 9670}, - {9666, 9674, 9675}, - {9506, 9679, 9679}, - {9669, 9685, 9686}, - {9683, 9690, 9690}, - {8763, 9697, 9698}, - {7468, 9702, 9702}, - {460, 9707, 9707}, - {3115, 9712, 9712}, - {9424, 9716, 9717}, - {7359, 9721, 9724}, - {7547, 9728, 9729}, - {7151, 9733, 9738}, - {7627, 9742, 9742}, - {2822, 9747, 9747}, - {8247, 9751, 9753}, - {9550, 9758, 9758}, - {7585, 9762, 9763}, - {1002, 9767, 9767}, - {7168, 9772, 9773}, - {6941, 9777, 9780}, - {9728, 9784, 9786}, - {9770, 9792, 9796}, - {6411, 9801, 9802}, - {3689, 9806, 9808}, - {9575, 9814, 9816}, - {7025, 9820, 9821}, - {2776, 9826, 9826}, - {9806, 9830, 9830}, - {9820, 9834, 9835}, - {9800, 9839, 9847}, - {9834, 9851, 9852}, - {9829, 9856, 9862}, - {1400, 9866, 9866}, - {3197, 9870, 9871}, - {9851, 9875, 9876}, - {9742, 9883, 9884}, - {3362, 9888, 9889}, - {9883, 9893, 9893}, - {5711, 9899, 9910}, - {7806, 9915, 9915}, - {9120, 9919, 9919}, - {9715, 9925, 9934}, - {2580, 9938, 9938}, - {4907, 9942, 9944}, - {6239, 9953, 9954}, - {6961, 9963, 9963}, - {5295, 9967, 9968}, - {1915, 9972, 9973}, - {3426, 9983, 9985}, - {9875, 9994, 9995}, - {6942, 9999, 9999}, - {6621, 10005, 10005}, - {7589, 10010, 10012}, - {9286, 10020, 10020}, - {838, 10024, 10024}, - {9980, 10028, 10031}, - {9994, 10035, 10041}, - {2702, 10048, 10051}, - {2621, 10059, 10059}, - {10054, 10065, 10065}, - {8612, 10073, 10074}, - {7033, 10078, 10078}, - {916, 10082, 10082}, - {10035, 10086, 10087}, - {8613, 10097, 10097}, - {9919, 10107, 10108}, - {6133, 10114, 10115}, - {10059, 10119, 10119}, - {10065, 10126, 10127}, - {7732, 10131, 10131}, - {7155, 10135, 10136}, - {6728, 10140, 10140}, - {6162, 10144, 10145}, - {4724, 10150, 10150}, - {1665, 10154, 10154}, - {10126, 10163, 10163}, - {9783, 10168, 10168}, - {1715, 10172, 10173}, - {7152, 10177, 10182}, - {8760, 10187, 10187}, - {7829, 10191, 10191}, - {9679, 10196, 10196}, - {9369, 10201, 10201}, - {2928, 10206, 10208}, - {6951, 10214, 10217}, - {5633, 10221, 10221}, - {7199, 10225, 10225}, - {10118, 10230, 10231}, - {9999, 10235, 10236}, - {10045, 10240, 10249}, - {5565, 10256, 10256}, - {9866, 10261, 10261}, - {10163, 10268, 10268}, - {9869, 10272, 10272}, - {9789, 10276, 10283}, - {10235, 10287, 10288}, - {10214, 10298, 10299}, - {6971, 10303, 10303}, - {3346, 10307, 10307}, - {10185, 10311, 10312}, - {9993, 10318, 10320}, - {2779, 10332, 10334}, - {1726, 10338, 10338}, - {741, 10354, 10360}, - {10230, 10372, 10373}, - {10260, 10384, 10385}, - {10131, 10389, 10398}, - {6946, 10406, 10409}, - {10158, 10413, 10420}, - {10123, 10424, 10424}, - {6157, 10428, 10429}, - {4518, 10434, 10434}, - {9893, 10438, 10438}, - {9865, 10442, 10446}, - {7558, 10454, 10454}, - {10434, 10460, 10460}, - {10064, 10466, 10468}, - {2703, 10472, 10474}, - {9751, 10478, 10479}, - {6714, 10485, 10485}, - {8020, 10490, 10490}, - {10303, 10494, 10494}, - {3521, 10499, 10500}, - {9281, 10513, 10515}, - {6028, 10519, 10523}, - {9387, 10527, 10527}, - {7614, 10531, 10531}, - {3611, 10536, 10536}, - {9162, 10540, 10540}, - {10081, 10546, 10547}, - {10034, 10560, 10562}, - {6726, 10567, 10571}, - {8237, 10575, 10575}, - {10438, 10579, 10583}, - {10140, 10587, 10587}, - {5784, 10592, 10592}, - {9819, 10597, 10600}, - {10567, 10604, 10608}, - {9335, 10613, 10613}, - {8300, 10617, 10617}, - {10575, 10621, 10621}, - {9678, 10625, 10626}, - {9962, 10632, 10633}, - {10535, 10637, 10638}, - {8199, 10642, 10642}, - {10372, 10647, 10648}, - {10637, 10656, 10657}, - {10579, 10667, 10668}, - {10465, 10677, 10680}, - {6702, 10684, 10685}, - {10073, 10691, 10692}, - {4505, 10696, 10697}, - {9042, 10701, 10701}, - {6460, 10705, 10706}, - {10010, 10714, 10716}, - {10656, 10720, 10722}, - {7282, 10727, 10729}, - {2327, 10733, 10733}, - {2491, 10740, 10741}, - {10704, 10748, 10750}, - {6465, 10754, 10754}, - {10647, 10758, 10759}, - {10424, 10763, 10763}, - {10748, 10776, 10776}, - {10546, 10780, 10781}, - {10758, 10785, 10786}, - {10287, 10790, 10797}, - {10785, 10801, 10807}, - {10240, 10811, 10826}, - {9509, 10830, 10830}, - {2579, 10836, 10838}, - {9801, 10843, 10845}, - {7555, 10849, 10850}, - {10776, 10860, 10865}, - {8023, 10869, 10869}, - {10046, 10876, 10884}, - {10253, 10888, 10892}, - {9941, 10897, 10897}, - {7898, 10901, 10905}, - {6725, 10909, 10913}, - {10757, 10921, 10923}, - {10160, 10931, 10931}, - {10916, 10935, 10942}, - {10261, 10946, 10946}, - {10318, 10952, 10954}, - {5911, 10959, 10961}, - {10801, 10965, 10966}, - {10946, 10970, 10977}, - {10592, 10982, 10984}, - {9913, 10988, 10990}, - {8510, 10994, 10996}, - {9419, 11000, 11001}, - {6765, 11006, 11007}, - {10725, 11011, 11011}, - {5537, 11017, 11019}, - {9208, 11024, 11025}, - {5850, 11030, 11030}, - {9610, 11034, 11036}, - {8846, 11041, 11047}, - {9697, 11051, 11051}, - {1622, 11055, 11058}, - {2370, 11062, 11062}, - {8393, 11067, 11067}, - {9756, 11071, 11071}, - {10172, 11076, 11076}, - {27, 11081, 11081}, - {7357, 11087, 11092}, - {8151, 11104, 11106}, - {6115, 11110, 11110}, - {10667, 11114, 11115}, - {11099, 11121, 11123}, - {10705, 11127, 11127}, - {8938, 11131, 11131}, - {11114, 11135, 11136}, - {1390, 11140, 11141}, - {10964, 11146, 11148}, - {11140, 11152, 11155}, - {9813, 11159, 11166}, - {624, 11171, 11172}, - {3118, 11177, 11179}, - {11029, 11184, 11186}, - {10186, 11190, 11190}, - {10306, 11196, 11196}, - {8665, 11201, 11201}, - {7382, 11205, 11205}, - {1100, 11210, 11210}, - {2337, 11216, 11217}, - {1609, 11221, 11223}, - {5763, 11228, 11229}, - {5220, 11233, 11233}, - {11061, 11241, 11241}, - {10617, 11246, 11246}, - {11190, 11250, 11251}, - {10144, 11255, 11256}, - {11232, 11260, 11260}, - {857, 11264, 11265}, - {10994, 11269, 11271}, - {3879, 11280, 11281}, - {11184, 11287, 11289}, - {9611, 11293, 11295}, - {11250, 11299, 11299}, - {4495, 11304, 11304}, - {7574, 11308, 11309}, - {9814, 11315, 11317}, - {1713, 11321, 11324}, - {1905, 11328, 11328}, - {8745, 11335, 11340}, - {8883, 11351, 11351}, - {8119, 11358, 11358}, - {1842, 11363, 11364}, - {11237, 11368, 11368}, - {8814, 11373, 11374}, - {5684, 11378, 11378}, - {11011, 11382, 11382}, - {6520, 11389, 11389}, - {11183, 11393, 11396}, - {1790, 11404, 11404}, - {9536, 11408, 11408}, - {11298, 11418, 11419}, - {3929, 11425, 11425}, - {5588, 11429, 11429}, - {8476, 11436, 11436}, - {4096, 11440, 11442}, - {11084, 11446, 11454}, - {10603, 11458, 11463}, - {7332, 11472, 11474}, - {7611, 11483, 11486}, - {4836, 11490, 11491}, - {10024, 11495, 11495}, - {4917, 11501, 11506}, - {6486, 11510, 11512}, - {11269, 11516, 11518}, - {3603, 11522, 11525}, - {11126, 11535, 11535}, - {11418, 11539, 11541}, - {11408, 11545, 11545}, - {9021, 11549, 11552}, - {6745, 11557, 11557}, - {5118, 11561, 11564}, - {7590, 11568, 11569}, - {4426, 11573, 11578}, - {9790, 11582, 11583}, - {6447, 11587, 11587}, - {10229, 11591, 11594}, - {10457, 11598, 11598}, - {10168, 11604, 11604}, - {10543, 11608, 11608}, - {7404, 11612, 11612}, - {11127, 11616, 11616}, - {3337, 11620, 11620}, - {11501, 11624, 11628}, - {4543, 11633, 11635}, - {8449, 11642, 11642}, - {4943, 11646, 11648}, - {10526, 11652, 11654}, - {11620, 11659, 11659}, - {8927, 11664, 11669}, - {532, 11673, 11673}, - {10513, 11677, 11679}, - {10428, 11683, 11683}, - {10999, 11689, 11690}, - {9469, 11695, 11695}, - {3606, 11699, 11699}, - {9560, 11708, 11709}, - {1564, 11714, 11714}, - {10527, 11718, 11718}, - {3071, 11723, 11726}, - {11590, 11731, 11732}, - {6605, 11737, 11737}, - {11624, 11741, 11745}, - {7822, 11749, 11752}, - {5269, 11757, 11758}, - {1339, 11767, 11767}, - {1363, 11771, 11773}, - {3704, 11777, 11777}, - {10952, 11781, 11783}, - {6764, 11793, 11795}, - {8675, 11800, 11800}, - {9963, 11804, 11804}, - {11573, 11808, 11809}, - {9548, 11813, 11813}, - {11591, 11817, 11818}, - {11446, 11822, 11822}, - {9224, 11828, 11828}, - {3158, 11836, 11836}, - {10830, 11840, 11840}, - {7234, 11846, 11846}, - {11299, 11850, 11850}, - {11544, 11854, 11855}, - {11498, 11859, 11859}, - {10993, 11865, 11868}, - {9720, 11872, 11878}, - {10489, 11882, 11890}, - {11712, 11898, 11904}, - {11516, 11908, 11910}, - {11568, 11914, 11915}, - {10177, 11919, 11924}, - {11363, 11928, 11929}, - {10494, 11933, 11933}, - {9870, 11937, 11938}, - {9427, 11942, 11942}, - {11481, 11949, 11949}, - {6030, 11955, 11957}, - {11718, 11961, 11961}, - {10531, 11965, 11983}, - {5126, 11987, 11987}, - {7515, 11991, 11991}, - {10646, 11996, 11997}, - {2947, 12001, 12001}, - {9582, 12009, 12010}, - {6202, 12017, 12018}, - {11714, 12022, 12022}, - {9235, 12033, 12037}, - {9721, 12041, 12044}, - {11932, 12051, 12052}, - {12040, 12056, 12056}, - {12051, 12060, 12060}, - {11601, 12066, 12066}, - {8426, 12070, 12070}, - {4053, 12077, 12077}, - {4262, 12081, 12081}, - {9761, 12086, 12088}, - {11582, 12092, 12093}, - {10965, 12097, 12098}, - {11803, 12103, 12104}, - {11933, 12108, 12109}, - {10688, 12117, 12117}, - {12107, 12125, 12126}, - {6774, 12130, 12132}, - {6286, 12137, 12137}, - {9543, 12141, 12141}, - {12097, 12145, 12146}, - {10790, 12150, 12150}, - {10125, 12154, 12156}, - {12125, 12164, 12164}, - {12064, 12168, 12172}, - {10811, 12178, 12188}, - {12092, 12192, 12193}, - {10058, 12197, 12198}, - {11611, 12211, 12212}, - {3459, 12216, 12216}, - {10291, 12225, 12228}, - {12191, 12232, 12234}, - {12145, 12238, 12238}, - {12001, 12242, 12250}, - {3840, 12255, 12255}, - {12216, 12259, 12259}, - {674, 12272, 12272}, - {12141, 12276, 12276}, - {10766, 12280, 12280}, - {11545, 12284, 12284}, - {6496, 12290, 12290}, - {11381, 12294, 12295}, - {603, 12302, 12303}, - {12276, 12308, 12308}, - {11850, 12313, 12314}, - {565, 12319, 12319}, - {9351, 12324, 12324}, - {11822, 12328, 12328}, - {2691, 12333, 12334}, - {11840, 12338, 12338}, - {11070, 12343, 12343}, - {9510, 12347, 12347}, - {11024, 12352, 12353}, - {7173, 12359, 12359}, - {517, 12363, 12363}, - {6311, 12367, 12368}, - {11367, 12372, 12373}, - {12008, 12377, 12377}, - {11372, 12382, 12384}, - {11358, 12391, 12392}, - {11382, 12396, 12396}, - {6882, 12400, 12401}, - {11246, 12405, 12405}, - {8359, 12409, 12412}, - {10154, 12418, 12418}, - {12016, 12425, 12426}, - {8972, 12434, 12435}, - {10478, 12439, 12440}, - {12395, 12449, 12449}, - {11612, 12454, 12454}, - {12347, 12458, 12458}, - {10700, 12466, 12467}, - {3637, 12471, 12476}, - {1042, 12480, 12481}, - {6747, 12488, 12488}, - {12396, 12492, 12493}, - {9420, 12497, 12497}, - {11285, 12501, 12510}, - {4470, 12515, 12515}, - {9374, 12519, 12519}, - {11293, 12528, 12528}, - {2058, 12534, 12535}, - {6521, 12539, 12539}, - {12492, 12543, 12543}, - {3043, 12547, 12547}, - {2982, 12551, 12553}, - {11030, 12557, 12563}, - {7636, 12568, 12568}, - {9639, 12572, 12572}, - {12543, 12576, 12576}, - {5989, 12580, 12583}, - {11051, 12587, 12587}, - {1061, 12592, 12594}, - {12313, 12599, 12601}, - {11846, 12605, 12605}, - {12576, 12609, 12609}, - {11040, 12618, 12625}, - {12479, 12629, 12629}, - {6903, 12633, 12633}, - {12322, 12639, 12639}, - {12253, 12643, 12645}, - {5594, 12651, 12651}, - {12522, 12655, 12655}, - {11703, 12659, 12659}, - {1377, 12665, 12665}, - {8022, 12669, 12669}, - {12280, 12674, 12674}, - {9023, 12680, 12681}, - {12328, 12685, 12685}, - {3085, 12689, 12693}, - {4700, 12698, 12698}, - {10224, 12702, 12702}, - {8781, 12706, 12706}, - {1651, 12710, 12710}, - {12458, 12714, 12714}, - {12005, 12718, 12721}, - {11908, 12725, 12726}, - {8202, 12733, 12733}, - {11708, 12739, 12740}, - {12599, 12744, 12745}, - {12284, 12749, 12749}, - {5285, 12756, 12756}, - {12055, 12775, 12777}, - {6919, 12782, 12782}, - {12242, 12786, 12786}, - {12009, 12790, 12790}, - {9628, 12794, 12796}, - {11354, 12801, 12802}, - {10225, 12806, 12807}, - {579, 12813, 12813}, - {8935, 12817, 12822}, - {8753, 12827, 12829}, - {11006, 12835, 12835}, - {858, 12841, 12845}, - {476, 12849, 12849}, - {7667, 12854, 12854}, - {12760, 12860, 12871}, - {11677, 12875, 12877}, - {12714, 12881, 12881}, - {12731, 12885, 12890}, - {7108, 12894, 12896}, - {1165, 12900, 12900}, - {4021, 12906, 12906}, - {10829, 12910, 12911}, - {12331, 12915, 12915}, - {8887, 12919, 12921}, - {11639, 12925, 12925}, - {7964, 12929, 12929}, - {12528, 12937, 12937}, - {8148, 12941, 12941}, - {12770, 12948, 12950}, - {12609, 12954, 12954}, - {12685, 12958, 12958}, - {2803, 12962, 12962}, - {9561, 12966, 12966}, - {6671, 12972, 12973}, - {12056, 12977, 12977}, - {6380, 12981, 12981}, - {12048, 12985, 12985}, - {11961, 12989, 12993}, - {3368, 12997, 12999}, - {6634, 13004, 13004}, - {6775, 13009, 13010}, - {12136, 13014, 13019}, - {10341, 13023, 13023}, - {13002, 13027, 13027}, - {10587, 13031, 13031}, - {10307, 13035, 13035}, - {12736, 13039, 13039}, - {12744, 13043, 13044}, - {6175, 13048, 13048}, - {9702, 13053, 13054}, - {662, 13059, 13061}, - {12718, 13065, 13068}, - {12893, 13072, 13075}, - {8299, 13086, 13091}, - {12604, 13095, 13096}, - {12848, 13100, 13101}, - {12749, 13105, 13105}, - {12526, 13109, 13114}, - {9173, 13122, 13122}, - {12769, 13128, 13128}, - {13038, 13132, 13132}, - {12725, 13136, 13137}, - {12639, 13146, 13146}, - {9711, 13150, 13151}, - {12137, 13155, 13155}, - {13039, 13159, 13159}, - {4681, 13163, 13164}, - {12954, 13168, 13168}, - {13158, 13175, 13176}, - {13105, 13180, 13180}, - {10754, 13184, 13184}, - {13167, 13188, 13188}, - {12658, 13192, 13192}, - {4294, 13199, 13200}, - {11682, 13204, 13205}, - {11695, 13209, 13209}, - {11076, 13214, 13214}, - {12232, 13218, 13218}, - {9399, 13223, 13224}, - {12880, 13228, 13229}, - {13048, 13234, 13234}, - {9701, 13238, 13239}, - {13209, 13243, 13243}, - {3658, 13248, 13248}, - {3698, 13252, 13254}, - {12237, 13260, 13260}, - {8872, 13266, 13266}, - {12957, 13272, 13273}, - {1393, 13281, 13281}, - {2013, 13285, 13288}, - {4244, 13296, 13299}, - {9428, 13303, 13303}, - {12702, 13307, 13307}, - {13078, 13311, 13311}, - {6071, 13315, 13315}, - {3061, 13319, 13319}, - {2051, 13324, 13324}, - {11560, 13328, 13331}, - {6584, 13336, 13336}, - {8482, 13340, 13340}, - {5331, 13344, 13344}, - {4171, 13348, 13348}, - {8501, 13352, 13352}, - {9219, 13356, 13356}, - {9473, 13360, 13363}, - {12881, 13367, 13367}, - {13065, 13371, 13375}, - {2979, 13379, 13384}, - {1518, 13388, 13388}, - {11177, 13392, 13392}, - {9457, 13398, 13398}, - {12293, 13407, 13410}, - {3697, 13414, 13417}, - {10338, 13425, 13425}, - {13367, 13429, 13429}, - {11074, 13433, 13437}, - {4201, 13441, 13443}, - {1812, 13447, 13448}, - {13360, 13452, 13456}, - {13188, 13463, 13463}, - {9732, 13470, 13470}, - {11332, 13477, 13477}, - {9918, 13487, 13487}, - {6337, 13497, 13497}, - {13429, 13501, 13501}, - {11413, 13505, 13505}, - {4685, 13512, 13513}, - {13136, 13517, 13519}, - {7416, 13528, 13530}, - {12929, 13534, 13534}, - {11110, 13539, 13539}, - {11521, 13543, 13543}, - {12825, 13553, 13553}, - {13447, 13557, 13558}, - {12299, 13562, 13563}, - {9003, 13570, 13570}, - {12500, 13577, 13577}, - {13501, 13581, 13581}, - {9392, 13586, 13586}, - {12454, 13590, 13590}, - {6189, 13595, 13595}, - {13053, 13599, 13599}, - {11881, 13604, 13604}, - {13159, 13608, 13608}, - {4894, 13612, 13612}, - {13221, 13621, 13621}, - {8950, 13625, 13625}, - {13533, 13629, 13629}, - {9633, 13633, 13633}, - {7892, 13637, 13639}, - {13581, 13643, 13643}, - {13616, 13647, 13649}, - {12794, 13653, 13654}, - {8919, 13659, 13659}, - {9674, 13663, 13663}, - {13577, 13668, 13668}, - {12966, 13672, 13672}, - {12659, 13676, 13683}, - {6124, 13688, 13688}, - {9225, 13693, 13695}, - {11833, 13702, 13702}, - {12904, 13709, 13717}, - {13647, 13721, 13722}, - {11687, 13726, 13727}, - {12434, 13731, 13732}, - {12689, 13736, 13742}, - {13168, 13746, 13746}, - {6151, 13751, 13752}, - {11821, 13756, 13757}, - {6467, 13764, 13764}, - {5730, 13769, 13769}, - {5136, 13780, 13780}, - {724, 13784, 13785}, - {13517, 13789, 13791}, - {640, 13795, 13796}, - {7721, 13800, 13802}, - {11121, 13806, 13807}, - {5791, 13811, 13815}, - {12894, 13819, 13819}, - {11100, 13824, 13824}, - {7011, 13830, 13830}, - {7129, 13834, 13837}, - {13833, 13841, 13841}, - {11276, 13847, 13847}, - {13621, 13853, 13853}, - {13589, 13862, 13863}, - {12989, 13867, 13867}, - {12789, 13871, 13871}, - {1239, 13875, 13875}, - {4675, 13879, 13881}, - {4686, 13885, 13885}, - {707, 13889, 13889}, - {5449, 13897, 13898}, - {13867, 13902, 13903}, - {10613, 13908, 13908}, - {13789, 13912, 13914}, - {4451, 13918, 13919}, - {9200, 13924, 13924}, - {2011, 13930, 13930}, - {11433, 13934, 13936}, - {4695, 13942, 13943}, - {9435, 13948, 13951}, - {13688, 13955, 13957}, - {11694, 13961, 13962}, - {5712, 13966, 13966}, - {5991, 13970, 13972}, - {13477, 13976, 13976}, - {10213, 13987, 13987}, - {11839, 13991, 13993}, - {12272, 13997, 13997}, - {6206, 14001, 14001}, - {13179, 14006, 14007}, - {2939, 14011, 14011}, - {12972, 14016, 14017}, - {13918, 14021, 14022}, - {7436, 14026, 14027}, - {7678, 14032, 14034}, - {13586, 14040, 14040}, - {13347, 14044, 14044}, - {13109, 14048, 14051}, - {9244, 14055, 14057}, - {13315, 14061, 14061}, - {13276, 14067, 14067}, - {11435, 14073, 14074}, - {13853, 14078, 14078}, - {13452, 14082, 14082}, - {14044, 14087, 14087}, - {4440, 14091, 14095}, - {4479, 14100, 14103}, - {9395, 14107, 14109}, - {6834, 14119, 14119}, - {10458, 14123, 14124}, - {1429, 14129, 14129}, - {8443, 14135, 14135}, - {10365, 14140, 14140}, - {5267, 14145, 14145}, - {11834, 14151, 14153}, -} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go index 0cf5e379..ece692ea 100644 --- a/vendor/github.com/golang/snappy/snappy.go +++ b/vendor/github.com/golang/snappy/snappy.go @@ -2,10 +2,21 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. // -// The C++ snappy implementation is at https://github.com/google/snappy +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. package snappy // import "github.com/golang/snappy" import ( diff --git a/vendor/github.com/golang/snappy/snappy_test.go b/vendor/github.com/golang/snappy/snappy_test.go deleted file mode 100644 index 2712710d..00000000 --- a/vendor/github.com/golang/snappy/snappy_test.go +++ /dev/null @@ -1,1353 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "testing" -) - -var ( - download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data") - benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data") -) - -// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by -// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on -// this GOARCH. There is more than one valid encoding of any given input, and -// there is more than one good algorithm along the frontier of trading off -// throughput for output size. Nonetheless, we presume that the C++ encoder's -// algorithm is a good one and has been tested on a wide range of inputs, so -// matching that exactly should mean that the Go encoder's algorithm is also -// good, without needing to gather our own corpus of test data. -// -// The exact algorithm used by the C++ code is potentially endian dependent, as -// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes -// at a time. The Go implementation is endian agnostic, in that its output is -// the same (as little-endian C++ code), regardless of the CPU's endianness. -// -// Thus, when comparing Go's output to C++ output generated beforehand, such as -// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little- -// endian system, we can run that test regardless of the runtime.GOARCH value. -// -// When comparing Go's output to dynamically generated C++ output, i.e. the -// result of fork/exec'ing a C++ program, we can run that test only on -// little-endian systems, because the C++ output might be different on -// big-endian systems. The runtime package doesn't export endianness per se, -// but we can restrict this match-C++ test to common little-endian systems. -const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" - -func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) { - got := maxEncodedLenOfMaxBlockSize - want := MaxEncodedLen(maxBlockSize) - if got != want { - t.Fatalf("got %d, want %d", got, want) - } -} - -func cmp(a, b []byte) error { - if bytes.Equal(a, b) { - return nil - } - if len(a) != len(b) { - return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) - } - for i := range a { - if a[i] != b[i] { - return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) - } - } - return nil -} - -func roundtrip(b, ebuf, dbuf []byte) error { - d, err := Decode(dbuf, Encode(ebuf, b)) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if err := cmp(d, b); err != nil { - return fmt.Errorf("roundtrip mismatch: %v", err) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rng := rand.New(rand.NewSource(1)) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(rng.Intn(256)) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestInvalidVarint(t *testing.T) { - testCases := []struct { - desc string - input string - }{{ - "invalid varint, final byte has continuation bit set", - "\xff", - }, { - "invalid varint, value overflows uint64", - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00", - }, { - // https://github.com/google/snappy/blob/master/format_description.txt - // says that "the stream starts with the uncompressed length [as a - // varint] (up to a maximum of 2^32 - 1)". - "valid varint (as uint64), but value overflows uint32", - "\x80\x80\x80\x80\x10", - }} - - for _, tc := range testCases { - input := []byte(tc.input) - if _, err := DecodedLen(input); err != ErrCorrupt { - t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err) - } - if _, err := Decode(nil, input); err != ErrCorrupt { - t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err) - } - } -} - -func TestDecode(t *testing.T) { - lit40Bytes := make([]byte, 40) - for i := range lit40Bytes { - lit40Bytes[i] = byte(i) - } - lit40 := string(lit40Bytes) - - testCases := []struct { - desc string - input string - want string - wantErr error - }{{ - `decodedLen=0; valid input`, - "\x00", - "", - nil, - }, { - `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`, - "\x03" + "\x08\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`, - "\x02" + "\x08\xff\xff\xff", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`, - "\x03" + "\x08\xff\xff", - "", - ErrCorrupt, - }, { - `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`, - "\x28" + "\x9c" + lit40, - lit40, - nil, - }, { - `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`, - "\x01" + "\xf0", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`, - "\x03" + "\xf0\x02\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`, - "\x01" + "\xf4\x00", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`, - "\x03" + "\xf4\x02\x00\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`, - "\x01" + "\xf8\x00\x00", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`, - "\x03" + "\xf8\x02\x00\x00\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`, - "\x01" + "\xfc\x00\x00\x00", - "", - ErrCorrupt, - }, { - `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`, - "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`, - "\x04" + "\xfc\x02\x00\x00\x00\xff", - "", - ErrCorrupt, - }, { - `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`, - "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff", - "\xff\xff\xff", - nil, - }, { - `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`, - "\x04" + "\x01", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`, - "\x04" + "\x02\x00", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`, - "\x04" + "\x03\x00\x00\x00", - "", - ErrCorrupt, - }, { - `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`, - "\x04" + "\x0cabcd", - "abcd", - nil, - }, { - `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`, - "\x0d" + "\x0cabcd" + "\x15\x04", - "abcdabcdabcda", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`, - "\x08" + "\x0cabcd" + "\x01\x04", - "abcdabcd", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`, - "\x08" + "\x0cabcd" + "\x01\x02", - "abcdcdcd", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`, - "\x08" + "\x0cabcd" + "\x01\x01", - "abcddddd", - nil, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`, - "\x08" + "\x0cabcd" + "\x01\x00", - "", - ErrCorrupt, - }, { - `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`, - "\x09" + "\x0cabcd" + "\x01\x04", - "", - ErrCorrupt, - }, { - `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`, - "\x08" + "\x0cabcd" + "\x01\x05", - "", - ErrCorrupt, - }, { - `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`, - "\x07" + "\x0cabcd" + "\x01\x04", - "", - ErrCorrupt, - }, { - `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`, - "\x06" + "\x0cabcd" + "\x06\x03\x00", - "abcdbc", - nil, - }, { - `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`, - "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00", - "abcdbc", - nil, - }} - - const ( - // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are - // not present in either the input or the output. It is written to dBuf - // to check that Decode does not write bytes past the end of - // dBuf[:dLen]. - // - // The magic number 37 was chosen because it is prime. A more 'natural' - // number like 32 might lead to a false negative if, for example, a - // byte was incorrectly copied 4*8 bytes later. - notPresentBase = 0xa0 - notPresentLen = 37 - ) - - var dBuf [100]byte -loop: - for i, tc := range testCases { - input := []byte(tc.input) - for _, x := range input { - if notPresentBase <= x && x < notPresentBase+notPresentLen { - t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input) - continue loop - } - } - - dLen, n := binary.Uvarint(input) - if n <= 0 { - t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc) - continue - } - if dLen > uint64(len(dBuf)) { - t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen) - continue - } - - for j := range dBuf { - dBuf[j] = byte(notPresentBase + j%notPresentLen) - } - g, gotErr := Decode(dBuf[:], input) - if got := string(g); got != tc.want || gotErr != tc.wantErr { - t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v", - i, tc.desc, got, gotErr, tc.want, tc.wantErr) - continue - } - for j, x := range dBuf { - if uint64(j) < dLen { - continue - } - if w := byte(notPresentBase + j%notPresentLen); x != w { - t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x", - i, tc.desc, j, x, w, dBuf) - continue loop - } - } - } -} - -func TestDecodeCopy4(t *testing.T) { - dots := strings.Repeat(".", 65536) - - input := strings.Join([]string{ - "\x89\x80\x04", // decodedLen = 65545. - "\x0cpqrs", // 4-byte literal "pqrs". - "\xf4\xff\xff" + dots, // 65536-byte literal dots. - "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540. - }, "") - - gotBytes, err := Decode(nil, []byte(input)) - if err != nil { - t.Fatal(err) - } - got := string(gotBytes) - want := "pqrs" + dots + "pqrs." - if len(got) != len(want) { - t.Fatalf("got %d bytes, want %d", len(got), len(want)) - } - if got != want { - for i := 0; i < len(got); i++ { - if g, w := got[i], want[i]; g != w { - t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w) - } - } - } -} - -// TestDecodeLengthOffset tests decoding an encoding of the form literal + -// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB". -func TestDecodeLengthOffset(t *testing.T) { - const ( - prefix = "abcdefghijklmnopqr" - suffix = "ABCDEFGHIJKLMNOPQR" - - // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are - // not present in either the input or the output. It is written to - // gotBuf to check that Decode does not write bytes past the end of - // gotBuf[:totalLen]. - // - // The magic number 37 was chosen because it is prime. A more 'natural' - // number like 32 might lead to a false negative if, for example, a - // byte was incorrectly copied 4*8 bytes later. - notPresentBase = 0xa0 - notPresentLen = 37 - ) - var gotBuf, wantBuf, inputBuf [128]byte - for length := 1; length <= 18; length++ { - for offset := 1; offset <= 18; offset++ { - loop: - for suffixLen := 0; suffixLen <= 18; suffixLen++ { - totalLen := len(prefix) + length + suffixLen - - inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen)) - inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1) - inputLen++ - inputLen += copy(inputBuf[inputLen:], prefix) - inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1) - inputBuf[inputLen+1] = byte(offset) - inputBuf[inputLen+2] = 0x00 - inputLen += 3 - if suffixLen > 0 { - inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1) - inputLen++ - inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen]) - } - input := inputBuf[:inputLen] - - for i := range gotBuf { - gotBuf[i] = byte(notPresentBase + i%notPresentLen) - } - got, err := Decode(gotBuf[:], input) - if err != nil { - t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err) - continue - } - - wantLen := 0 - wantLen += copy(wantBuf[wantLen:], prefix) - for i := 0; i < length; i++ { - wantBuf[wantLen] = wantBuf[wantLen-offset] - wantLen++ - } - wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen]) - want := wantBuf[:wantLen] - - for _, x := range input { - if notPresentBase <= x && x < notPresentBase+notPresentLen { - t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x", - length, offset, suffixLen, x, input) - continue loop - } - } - for i, x := range gotBuf { - if i < totalLen { - continue - } - if w := byte(notPresentBase + i%notPresentLen); x != w { - t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+ - "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x", - length, offset, suffixLen, totalLen, i, x, w, gotBuf) - continue loop - } - } - for _, x := range want { - if notPresentBase <= x && x < notPresentBase+notPresentLen { - t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x", - length, offset, suffixLen, x, want) - continue loop - } - } - - if !bytes.Equal(got, want) { - t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x", - length, offset, suffixLen, input, got, want) - continue - } - } - } - } -} - -const ( - goldenText = "Mark.Twain-Tom.Sawyer.txt" - goldenCompressed = goldenText + ".rawsnappy" -) - -func TestDecodeGoldenInput(t *testing.T) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - got, err := Decode(nil, src) - if err != nil { - t.Fatalf("Decode: %v", err) - } - want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - if err := cmp(got, want); err != nil { - t.Fatal(err) - } -} - -func TestEncodeGoldenInput(t *testing.T) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - got := Encode(nil, src) - want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - if err := cmp(got, want); err != nil { - t.Fatal(err) - } -} - -func TestExtendMatchGoldenInput(t *testing.T) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - t.Fatalf("ReadFile: %v", err) - } - for i, tc := range extendMatchGoldenTestCases { - got := extendMatch(src, tc.i, tc.j) - if got != tc.want { - t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)", - i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j) - } - } -} - -func TestExtendMatch(t *testing.T) { - // ref is a simple, reference implementation of extendMatch. - ref := func(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j - } - - nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40} - for yIndex := 40; yIndex > 30; yIndex-- { - xxx := bytes.Repeat([]byte("x"), 40) - if yIndex < len(xxx) { - xxx[yIndex] = 'y' - } - for _, i := range nums { - for _, j := range nums { - if i >= j { - continue - } - got := extendMatch(xxx, i, j) - want := ref(xxx, i, j) - if got != want { - t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want) - } - } - } - } -} - -const snappytoolCmdName = "cmd/snappytool/snappytool" - -func skipTestSameEncodingAsCpp() (msg string) { - if !goEncoderShouldMatchCppEncoder { - return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH) - } - if _, err := os.Stat(snappytoolCmdName); err != nil { - return fmt.Sprintf("could not find snappytool: %v", err) - } - return "" -} - -func runTestSameEncodingAsCpp(src []byte) error { - got := Encode(nil, src) - - cmd := exec.Command(snappytoolCmdName, "-e") - cmd.Stdin = bytes.NewReader(src) - want, err := cmd.Output() - if err != nil { - return fmt.Errorf("could not run snappytool: %v", err) - } - return cmp(got, want) -} - -func TestSameEncodingAsCppShortCopies(t *testing.T) { - if msg := skipTestSameEncodingAsCpp(); msg != "" { - t.Skip(msg) - } - src := bytes.Repeat([]byte{'a'}, 20) - for i := 0; i <= len(src); i++ { - if err := runTestSameEncodingAsCpp(src[:i]); err != nil { - t.Errorf("i=%d: %v", i, err) - } - } -} - -func TestSameEncodingAsCppLongFiles(t *testing.T) { - if msg := skipTestSameEncodingAsCpp(); msg != "" { - t.Skip(msg) - } - bDir := filepath.FromSlash(*benchdataDir) - failed := false - for i, tf := range testFiles { - if err := downloadBenchmarkFiles(t, tf.filename); err != nil { - t.Fatalf("failed to download testdata: %s", err) - } - data := readFile(t, filepath.Join(bDir, tf.filename)) - if n := tf.sizeLimit; 0 < n && n < len(data) { - data = data[:n] - } - if err := runTestSameEncodingAsCpp(data); err != nil { - t.Errorf("i=%d: %v", i, err) - failed = true - } - } - if failed { - t.Errorf("was the snappytool program built against the C++ snappy library version " + - "d53de187 or later, commited on 2016-04-05? See " + - "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc") - } -} - -// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm -// described in decode_amd64.s and its claim of a 10 byte overrun worst case. -func TestSlowForwardCopyOverrun(t *testing.T) { - const base = 100 - - for length := 1; length < 18; length++ { - for offset := 1; offset < 18; offset++ { - highWaterMark := base - d := base - l := length - o := offset - - // makeOffsetAtLeast8 - for o < 8 { - if end := d + 8; highWaterMark < end { - highWaterMark = end - } - l -= o - d += o - o += o - } - - // fixUpSlowForwardCopy - a := d - d += l - - // finishSlowForwardCopy - for l > 0 { - if end := a + 8; highWaterMark < end { - highWaterMark = end - } - a += 8 - l -= 8 - } - - dWant := base + length - overrun := highWaterMark - dWant - if d != dWant || overrun < 0 || 10 < overrun { - t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])", - length, offset, d, overrun, dWant) - } - } - } -} - -// TestEncodeNoiseThenRepeats encodes input for which the first half is very -// incompressible and the second half is very compressible. The encoded form's -// length should be closer to 50% of the original length than 100%. -func TestEncodeNoiseThenRepeats(t *testing.T) { - for _, origLen := range []int{256 * 1024, 2048 * 1024} { - src := make([]byte, origLen) - rng := rand.New(rand.NewSource(1)) - firstHalf, secondHalf := src[:origLen/2], src[origLen/2:] - for i := range firstHalf { - firstHalf[i] = uint8(rng.Intn(256)) - } - for i := range secondHalf { - secondHalf[i] = uint8(i >> 8) - } - dst := Encode(nil, src) - if got, want := len(dst), origLen*3/4; got >= want { - t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want) - } - } -} - -func TestFramingFormat(t *testing.T) { - // src is comprised of alternating 1e5-sized sequences of random - // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen - // because it is larger than maxBlockSize (64k). - src := make([]byte, 1e6) - rng := rand.New(rand.NewSource(1)) - for i := 0; i < 10; i++ { - if i%2 == 0 { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(rng.Intn(256)) - } - } else { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(i) - } - } - } - - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(src); err != nil { - t.Fatalf("Write: encoding: %v", err) - } - dst, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Fatalf("ReadAll: decoding: %v", err) - } - if err := cmp(dst, src); err != nil { - t.Fatal(err) - } -} - -func TestWriterGoldenOutput(t *testing.T) { - buf := new(bytes.Buffer) - w := NewBufferedWriter(buf) - defer w.Close() - w.Write([]byte("abcd")) // Not compressible. - w.Flush() - w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible. - w.Flush() - // The next chunk is also compressible, but a naive, greedy encoding of the - // overall length 67 copy as a length 64 copy (the longest expressible as a - // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte - // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4 - // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2 - // (of length 60) and a 2-byte tagCopy1 (of length 7). - w.Write(bytes.Repeat([]byte{'B'}, 68)) - w.Write([]byte("efC")) // Not compressible. - w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible. - w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible. - w.Write([]byte("g")) // Not compressible. - w.Flush() - - got := buf.String() - want := strings.Join([]string{ - magicChunk, - "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum). - "\x68\x10\xe6\xb6", // Checksum. - "\x61\x62\x63\x64", // Uncompressed payload: "abcd". - "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum). - "\x5f\xeb\xf2\x10", // Checksum. - "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150. - "\x00\x41", // Compressed payload: tagLiteral, length=1, "A". - "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. - "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. - "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1. - "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum). - "\x30\x85\x69\xeb", // Checksum. - "\x70", // Compressed payload: Uncompressed length (varint encoded): 112. - "\x00\x42", // Compressed payload: tagLiteral, length=1, "B". - "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1. - "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1. - "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC". - "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1. - "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90. - "\x00\x67", // Compressed payload: tagLiteral, length=1, "g". - }, "") - if got != want { - t.Fatalf("\ngot: % x\nwant: % x", got, want) - } -} - -func TestEmitLiteral(t *testing.T) { - testCases := []struct { - length int - want string - }{ - {1, "\x00"}, - {2, "\x04"}, - {59, "\xe8"}, - {60, "\xec"}, - {61, "\xf0\x3c"}, - {62, "\xf0\x3d"}, - {254, "\xf0\xfd"}, - {255, "\xf0\xfe"}, - {256, "\xf0\xff"}, - {257, "\xf4\x00\x01"}, - {65534, "\xf4\xfd\xff"}, - {65535, "\xf4\xfe\xff"}, - {65536, "\xf4\xff\xff"}, - } - - dst := make([]byte, 70000) - nines := bytes.Repeat([]byte{0x99}, 65536) - for _, tc := range testCases { - lit := nines[:tc.length] - n := emitLiteral(dst, lit) - if !bytes.HasSuffix(dst[:n], lit) { - t.Errorf("length=%d: did not end with that many literal bytes", tc.length) - continue - } - got := string(dst[:n-tc.length]) - if got != tc.want { - t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want) - continue - } - } -} - -func TestEmitCopy(t *testing.T) { - testCases := []struct { - offset int - length int - want string - }{ - {8, 04, "\x01\x08"}, - {8, 11, "\x1d\x08"}, - {8, 12, "\x2e\x08\x00"}, - {8, 13, "\x32\x08\x00"}, - {8, 59, "\xea\x08\x00"}, - {8, 60, "\xee\x08\x00"}, - {8, 61, "\xf2\x08\x00"}, - {8, 62, "\xf6\x08\x00"}, - {8, 63, "\xfa\x08\x00"}, - {8, 64, "\xfe\x08\x00"}, - {8, 65, "\xee\x08\x00\x05\x08"}, - {8, 66, "\xee\x08\x00\x09\x08"}, - {8, 67, "\xee\x08\x00\x0d\x08"}, - {8, 68, "\xfe\x08\x00\x01\x08"}, - {8, 69, "\xfe\x08\x00\x05\x08"}, - {8, 80, "\xfe\x08\x00\x3e\x08\x00"}, - - {256, 04, "\x21\x00"}, - {256, 11, "\x3d\x00"}, - {256, 12, "\x2e\x00\x01"}, - {256, 13, "\x32\x00\x01"}, - {256, 59, "\xea\x00\x01"}, - {256, 60, "\xee\x00\x01"}, - {256, 61, "\xf2\x00\x01"}, - {256, 62, "\xf6\x00\x01"}, - {256, 63, "\xfa\x00\x01"}, - {256, 64, "\xfe\x00\x01"}, - {256, 65, "\xee\x00\x01\x25\x00"}, - {256, 66, "\xee\x00\x01\x29\x00"}, - {256, 67, "\xee\x00\x01\x2d\x00"}, - {256, 68, "\xfe\x00\x01\x21\x00"}, - {256, 69, "\xfe\x00\x01\x25\x00"}, - {256, 80, "\xfe\x00\x01\x3e\x00\x01"}, - - {2048, 04, "\x0e\x00\x08"}, - {2048, 11, "\x2a\x00\x08"}, - {2048, 12, "\x2e\x00\x08"}, - {2048, 13, "\x32\x00\x08"}, - {2048, 59, "\xea\x00\x08"}, - {2048, 60, "\xee\x00\x08"}, - {2048, 61, "\xf2\x00\x08"}, - {2048, 62, "\xf6\x00\x08"}, - {2048, 63, "\xfa\x00\x08"}, - {2048, 64, "\xfe\x00\x08"}, - {2048, 65, "\xee\x00\x08\x12\x00\x08"}, - {2048, 66, "\xee\x00\x08\x16\x00\x08"}, - {2048, 67, "\xee\x00\x08\x1a\x00\x08"}, - {2048, 68, "\xfe\x00\x08\x0e\x00\x08"}, - {2048, 69, "\xfe\x00\x08\x12\x00\x08"}, - {2048, 80, "\xfe\x00\x08\x3e\x00\x08"}, - } - - dst := make([]byte, 1024) - for _, tc := range testCases { - n := emitCopy(dst, tc.offset, tc.length) - got := string(dst[:n]) - if got != tc.want { - t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want) - } - } -} - -func TestNewBufferedWriter(t *testing.T) { - // Test all 32 possible sub-sequences of these 5 input slices. - // - // Their lengths sum to 400,000, which is over 6 times the Writer ibuf - // capacity: 6 * maxBlockSize is 393,216. - inputs := [][]byte{ - bytes.Repeat([]byte{'a'}, 40000), - bytes.Repeat([]byte{'b'}, 150000), - bytes.Repeat([]byte{'c'}, 60000), - bytes.Repeat([]byte{'d'}, 120000), - bytes.Repeat([]byte{'e'}, 30000), - } -loop: - for i := 0; i < 1< 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // Note: the file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) } -func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) } -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) } -func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -func BenchmarkRandomEncode(b *testing.B) { - rng := rand.New(rand.NewSource(1)) - data := make([]byte, 1<<20) - for i := range data { - data[i] = uint8(rng.Intn(256)) - } - benchEncode(b, data) -} - -// testFiles' values are copied directly from -// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string - sizeLimit int -}{ - {"html", "html", 0}, - {"urls", "urls.10K", 0}, - {"jpg", "fireworks.jpeg", 0}, - {"jpg_200", "fireworks.jpeg", 200}, - {"pdf", "paper-100k.pdf", 0}, - {"html4", "html_x_4", 0}, - {"txt1", "alice29.txt", 0}, - {"txt2", "asyoulik.txt", 0}, - {"txt3", "lcet10.txt", 0}, - {"txt4", "plrabn12.txt", 0}, - {"pb", "geo.protodata", 0}, - {"gaviota", "kppkn.gtb", 0}, -} - -const ( - // The benchmark data files are at this canonical URL. - benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" -) - -func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) { - bDir := filepath.FromSlash(*benchdataDir) - filename := filepath.Join(bDir, basename) - if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { - return nil - } - - if !*download { - b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b)) - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) { - return fmt.Errorf("failed to create %s: %s", bDir, err) - } - - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - url := benchURL + basename - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("failed to download %s: %s", url, err) - } - defer resp.Body.Close() - if s := resp.StatusCode; s != http.StatusOK { - return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) - } - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) - } - return nil -} - -func benchFile(b *testing.B, i int, decode bool) { - if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - bDir := filepath.FromSlash(*benchdataDir) - data := readFile(b, filepath.Join(bDir, testFiles[i].filename)) - if n := testFiles[i].sizeLimit; 0 < n && n < len(data) { - data = data[:n] - } - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } - -func BenchmarkExtendMatch(b *testing.B) { - tDir := filepath.FromSlash(*testdataDir) - src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) - if err != nil { - b.Fatalf("ReadFile: %v", err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, tc := range extendMatchGoldenTestCases { - extendMatch(src, tc.i, tc.j) - } - } -} diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt deleted file mode 100644 index 86a18750..00000000 --- a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt +++ /dev/null @@ -1,396 +0,0 @@ -Produced by David Widger. The previous edition was updated by Jose -Menendez. - - - - - - THE ADVENTURES OF TOM SAWYER - BY - MARK TWAIN - (Samuel Langhorne Clemens) - - - - - P R E F A C E - -MOST of the adventures recorded in this book really occurred; one or -two were experiences of my own, the rest those of boys who were -schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but -not from an individual--he is a combination of the characteristics of -three boys whom I knew, and therefore belongs to the composite order of -architecture. - -The odd superstitions touched upon were all prevalent among children -and slaves in the West at the period of this story--that is to say, -thirty or forty years ago. - -Although my book is intended mainly for the entertainment of boys and -girls, I hope it will not be shunned by men and women on that account, -for part of my plan has been to try to pleasantly remind adults of what -they once were themselves, and of how they felt and thought and talked, -and what queer enterprises they sometimes engaged in. - - THE AUTHOR. - -HARTFORD, 1876. - - - - T O M S A W Y E R - - - -CHAPTER I - -"TOM!" - -No answer. - -"TOM!" - -No answer. - -"What's gone with that boy, I wonder? You TOM!" - -No answer. - -The old lady pulled her spectacles down and looked over them about the -room; then she put them up and looked out under them. She seldom or -never looked THROUGH them for so small a thing as a boy; they were her -state pair, the pride of her heart, and were built for "style," not -service--she could have seen through a pair of stove-lids just as well. -She looked perplexed for a moment, and then said, not fiercely, but -still loud enough for the furniture to hear: - -"Well, I lay if I get hold of you I'll--" - -She did not finish, for by this time she was bending down and punching -under the bed with the broom, and so she needed breath to punctuate the -punches with. She resurrected nothing but the cat. - -"I never did see the beat of that boy!" - -She went to the open door and stood in it and looked out among the -tomato vines and "jimpson" weeds that constituted the garden. No Tom. -So she lifted up her voice at an angle calculated for distance and -shouted: - -"Y-o-u-u TOM!" - -There was a slight noise behind her and she turned just in time to -seize a small boy by the slack of his roundabout and arrest his flight. - -"There! I might 'a' thought of that closet. What you been doing in -there?" - -"Nothing." - -"Nothing! Look at your hands. And look at your mouth. What IS that -truck?" - -"I don't know, aunt." - -"Well, I know. It's jam--that's what it is. Forty times I've said if -you didn't let that jam alone I'd skin you. Hand me that switch." - -The switch hovered in the air--the peril was desperate-- - -"My! Look behind you, aunt!" - -The old lady whirled round, and snatched her skirts out of danger. The -lad fled on the instant, scrambled up the high board-fence, and -disappeared over it. - -His aunt Polly stood surprised a moment, and then broke into a gentle -laugh. - -"Hang the boy, can't I never learn anything? Ain't he played me tricks -enough like that for me to be looking out for him by this time? But old -fools is the biggest fools there is. Can't learn an old dog new tricks, -as the saying is. But my goodness, he never plays them alike, two days, -and how is a body to know what's coming? He 'pears to know just how -long he can torment me before I get my dander up, and he knows if he -can make out to put me off for a minute or make me laugh, it's all down -again and I can't hit him a lick. I ain't doing my duty by that boy, -and that's the Lord's truth, goodness knows. Spare the rod and spile -the child, as the Good Book says. I'm a laying up sin and suffering for -us both, I know. He's full of the Old Scratch, but laws-a-me! he's my -own dead sister's boy, poor thing, and I ain't got the heart to lash -him, somehow. Every time I let him off, my conscience does hurt me so, -and every time I hit him my old heart most breaks. Well-a-well, man -that is born of woman is of few days and full of trouble, as the -Scripture says, and I reckon it's so. He'll play hookey this evening, * -and [* Southwestern for "afternoon"] I'll just be obleeged to make him -work, to-morrow, to punish him. It's mighty hard to make him work -Saturdays, when all the boys is having holiday, but he hates work more -than he hates anything else, and I've GOT to do some of my duty by him, -or I'll be the ruination of the child." - -Tom did play hookey, and he had a very good time. He got back home -barely in season to help Jim, the small colored boy, saw next-day's -wood and split the kindlings before supper--at least he was there in -time to tell his adventures to Jim while Jim did three-fourths of the -work. Tom's younger brother (or rather half-brother) Sid was already -through with his part of the work (picking up chips), for he was a -quiet boy, and had no adventurous, troublesome ways. - -While Tom was eating his supper, and stealing sugar as opportunity -offered, Aunt Polly asked him questions that were full of guile, and -very deep--for she wanted to trap him into damaging revealments. Like -many other simple-hearted souls, it was her pet vanity to believe she -was endowed with a talent for dark and mysterious diplomacy, and she -loved to contemplate her most transparent devices as marvels of low -cunning. Said she: - -"Tom, it was middling warm in school, warn't it?" - -"Yes'm." - -"Powerful warm, warn't it?" - -"Yes'm." - -"Didn't you want to go in a-swimming, Tom?" - -A bit of a scare shot through Tom--a touch of uncomfortable suspicion. -He searched Aunt Polly's face, but it told him nothing. So he said: - -"No'm--well, not very much." - -The old lady reached out her hand and felt Tom's shirt, and said: - -"But you ain't too warm now, though." And it flattered her to reflect -that she had discovered that the shirt was dry without anybody knowing -that that was what she had in her mind. But in spite of her, Tom knew -where the wind lay, now. So he forestalled what might be the next move: - -"Some of us pumped on our heads--mine's damp yet. See?" - -Aunt Polly was vexed to think she had overlooked that bit of -circumstantial evidence, and missed a trick. Then she had a new -inspiration: - -"Tom, you didn't have to undo your shirt collar where I sewed it, to -pump on your head, did you? Unbutton your jacket!" - -The trouble vanished out of Tom's face. He opened his jacket. His -shirt collar was securely sewed. - -"Bother! Well, go 'long with you. I'd made sure you'd played hookey -and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a -singed cat, as the saying is--better'n you look. THIS time." - -She was half sorry her sagacity had miscarried, and half glad that Tom -had stumbled into obedient conduct for once. - -But Sidney said: - -"Well, now, if I didn't think you sewed his collar with white thread, -but it's black." - -"Why, I did sew it with white! Tom!" - -But Tom did not wait for the rest. As he went out at the door he said: - -"Siddy, I'll lick you for that." - -In a safe place Tom examined two large needles which were thrust into -the lapels of his jacket, and had thread bound about them--one needle -carried white thread and the other black. He said: - -"She'd never noticed if it hadn't been for Sid. Confound it! sometimes -she sews it with white, and sometimes she sews it with black. I wish to -geeminy she'd stick to one or t'other--I can't keep the run of 'em. But -I bet you I'll lam Sid for that. I'll learn him!" - -He was not the Model Boy of the village. He knew the model boy very -well though--and loathed him. - -Within two minutes, or even less, he had forgotten all his troubles. -Not because his troubles were one whit less heavy and bitter to him -than a man's are to a man, but because a new and powerful interest bore -them down and drove them out of his mind for the time--just as men's -misfortunes are forgotten in the excitement of new enterprises. This -new interest was a valued novelty in whistling, which he had just -acquired from a negro, and he was suffering to practise it undisturbed. -It consisted in a peculiar bird-like turn, a sort of liquid warble, -produced by touching the tongue to the roof of the mouth at short -intervals in the midst of the music--the reader probably remembers how -to do it, if he has ever been a boy. Diligence and attention soon gave -him the knack of it, and he strode down the street with his mouth full -of harmony and his soul full of gratitude. He felt much as an -astronomer feels who has discovered a new planet--no doubt, as far as -strong, deep, unalloyed pleasure is concerned, the advantage was with -the boy, not the astronomer. - -The summer evenings were long. It was not dark, yet. Presently Tom -checked his whistle. A stranger was before him--a boy a shade larger -than himself. A new-comer of any age or either sex was an impressive -curiosity in the poor little shabby village of St. Petersburg. This boy -was well dressed, too--well dressed on a week-day. This was simply -astounding. His cap was a dainty thing, his close-buttoned blue cloth -roundabout was new and natty, and so were his pantaloons. He had shoes -on--and it was only Friday. He even wore a necktie, a bright bit of -ribbon. He had a citified air about him that ate into Tom's vitals. The -more Tom stared at the splendid marvel, the higher he turned up his -nose at his finery and the shabbier and shabbier his own outfit seemed -to him to grow. Neither boy spoke. If one moved, the other moved--but -only sidewise, in a circle; they kept face to face and eye to eye all -the time. Finally Tom said: - -"I can lick you!" - -"I'd like to see you try it." - -"Well, I can do it." - -"No you can't, either." - -"Yes I can." - -"No you can't." - -"I can." - -"You can't." - -"Can!" - -"Can't!" - -An uncomfortable pause. Then Tom said: - -"What's your name?" - -"'Tisn't any of your business, maybe." - -"Well I 'low I'll MAKE it my business." - -"Well why don't you?" - -"If you say much, I will." - -"Much--much--MUCH. There now." - -"Oh, you think you're mighty smart, DON'T you? I could lick you with -one hand tied behind me, if I wanted to." - -"Well why don't you DO it? You SAY you can do it." - -"Well I WILL, if you fool with me." - -"Oh yes--I've seen whole families in the same fix." - -"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" - -"You can lump that hat if you don't like it. I dare you to knock it -off--and anybody that'll take a dare will suck eggs." - -"You're a liar!" - -"You're another." - -"You're a fighting liar and dasn't take it up." - -"Aw--take a walk!" - -"Say--if you give me much more of your sass I'll take and bounce a -rock off'n your head." - -"Oh, of COURSE you will." - -"Well I WILL." - -"Well why don't you DO it then? What do you keep SAYING you will for? -Why don't you DO it? It's because you're afraid." - -"I AIN'T afraid." - -"You are." - -"I ain't." - -"You are." - -Another pause, and more eying and sidling around each other. Presently -they were shoulder to shoulder. Tom said: - -"Get away from here!" - -"Go away yourself!" - -"I won't." - -"I won't either." - -So they stood, each with a foot placed at an angle as a brace, and -both shoving with might and main, and glowering at each other with -hate. But neither could get an advantage. After struggling till both -were hot and flushed, each relaxed his strain with watchful caution, -and Tom said: - -"You're a coward and a pup. I'll tell my big brother on you, and he -can thrash you with his little finger, and I'll make him do it, too." - -"What do I care for your big brother? I've got a brother that's bigger -than he is--and what's more, he can throw him over that fence, too." -[Both brothers were imaginary.] - -"That's a lie." - -"YOUR saying so don't make it so." - -Tom drew a line in the dust with his big toe, and said: - -"I dare you to step over that, and I'll lick you till you can't stand -up. Anybody that'll take a dare will steal sheep." - -The new boy stepped over promptly, and said: - -"Now you said you'd do it, now let's see you do it." - -"Don't you crowd me now; you better look out." - -"Well, you SAID you'd do it--why don't you do it?" - -"By jingo! for two cents I WILL do it." - -The new boy took two broad coppers out of his pocket and held them out -with derision. Tom struck them to the ground. In an instant both boys -were rolling and tumbling in the dirt, gripped together like cats; and -for the space of a minute they tugged and tore at each other's hair and -clothes, punched and scratched each other's nose, and covered -themselves with dust and glory. Presently the confusion took form, and -through the fog of battle Tom appeared, seated astride the new boy, and -pounding him with his fists. "Holler 'nuff!" said he. - -The boy only struggled to free himself. He was crying--mainly from rage. - -"Holler 'nuff!"--and the pounding went on. - -At last the stranger got out a smothered "'Nuff!" and Tom let him up -and said: - -"Now that'll learn you. Better look out who you're fooling with next -time." - -The new boy went off brushing the dust from his clothes, sobbing, -snuffling, and occasionally looking back and shaking his head and -threatening what he would do to Tom the "next time he caught him out." -To which Tom responded with jeers, and started off in high feather, and -as soon as his back was turned the new boy snatched up a stone, threw -it and hit him between the shoulders and then turned tail and ran like -an antelope. Tom chased the traitor home, and thus found out where he -lived. He then held a position at the gate for some time, daring the -enemy to come outside, but the enemy only made faces at him through the -window and declined. At last the enemy's mother appeared, and called -Tom a bad, vicious, vulgar child, and ordered him away. So he went -away; but he said he "'lowed" to "lay" for that boy. - -He got home pretty late that night, and when he climbed cautiously in -at the window, he uncovered an ambuscade, in the person of his aunt; -and when she saw the state his clothes were in her resolution to turn -his Saturday holiday into captivity at hard labor became adamantine in -its firmness. diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy deleted file mode 100644 index 9c56d985888e48a9967e187523f0e3326330aeca..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9871 zcmW++d3+ni*`6u8u)A7~rM0|~WyP6QqBu?@lY=8Ar;;dGG#9aRhm?^tk~Wrh#qP>N zQ1~GlQvw7CE&Q640B$iMX|9tMwyz{)z z`#jI+Pu3f296Mjj@jT5o=rT5F=II7AU*t{??Jsd!b@-rZ*Idf;rf1p~tuvR_s(I#d zarWGEY?mu5xy7wKzoT}^s4@KYtwyn^>W(3dL`{kZP=7vyJ{wy zjghcqlQS7jS(#e zmZs@)nxac-T2WT6?(3&^fqJk`mLKGnS97>a9iFqDJZ#9c;8&(gv$j0|KV{`|gW9=V z&2e~s9<|~5wxf7;o7TC*DZjAF9g|x*cmzDO0)Jb#690QRJk^6QV6)f`XYGbd>m~Wj zDT<_YJpY-0H!a>nduzD?y4AHM2i^#*q+E4y}r zW^0~e*`7RQF@yiG@+MTj#>&ZLiQ{Ec7|q$0tZ6aN_|*UD)?A4>Ea$-*UC+#ouTk+z z4(Dw@o>`$8I$-g7f-);_bS-vx!G;}WdD*s#F7<5OhPQ;PwrhHv8gr3@d!x+BmEhZ) zu~zC)l+;Dj?0lZOHK>8>r7yyZ617|jYFMj|pSiJ2g6gGzyoQyb%9K^$xfK4toOigT z;2mMN%m$%u@x7GO4dc~fo;YM&BG`HPwbP&$u4g-qWYR0aX3z@HrY>tFVEv}!L7g31 zu(7mcktmzOUc|f?epI z^F!FaQZ${in}CXx>?)_GN9$&}OrgwB59*d`mc_X#0j%&$iPmkbNU%fkuEi{uvul=@ zP@s0S${Y`GoewXsmf>tl0QxM4e|cW=oCZE+W3|kg%PbEN+Ga;}X>V#xe|jy=C`t_a(ugU_DjSgFLL2mK z4x#7<*g=;|i|iw{ZhEEe{k?$s1cj2;ZD@375#40hhVjC+D1G9xa-No19_mypmjN#& z0JmG^IgjPau;RR3&q!0s(17qedyuEIp$cVLyOw?}q&T)+nP(Iwi@L*4$rIEHpe=q+ z@GQ0F(Hg!EPoG)op?%QYP`$ieY3>&+jyL&dg!_Bfu3mR`uh^Y_Dc}N@D(n)(n#aQc zN;oKn-x2Gmk(qxQEJkrXpei^|)7kmw0Ms& zo`^9CI-5I#W{xM5_&Mmp8f>z}2Jzjg*92&}9f*k>Z=MOD9^}cgnRn^-8v92SXn~i@ zJqjN4AED4l0Fn!NUbG|zs9op3TJTE7T$P#m1io|%EtuTN@p2EhWS)LbhRK1t>}){`}ZwY8IH<7+mLvV9nGBvc;u>S2GT(0A)oySKzL=B-0TES{&!n+}OV)XfJFT8tzeP zj(IE~WN79L{2R&TTWWiOi)}Y6RoAv+P-LEO@(}ajWUSEzsD>XfZeLbp!1-1W z1tFRB_$ZJEN@c^b;X{&J zvhlZ#4?~-hcCwbNy(;vVMt^MFGm}xbY!*u%@QCRednG8H`6_I{AqwP@hMbNLYRo!2so{4J6pevxa zjML`=1_0B*e{K&=S^4<_FLm|a;U^s~zr)8k#R+k3@!zN242`Ep@ zU9b#Kka}olc$>PNRgy>VLS3X?7MPcs5isG2%a7h}EL2@US#xq201OaR*uXf_@HVN; zJ+Xn)5)8HBJENH{7$XCwg&%_l_8Mv9GQ26|0*~ZM_{!m%Wg^hhaP9DY>GcxyB)%5$ z=6b;_$B)m?b07`}x;2@E9jADog<@HA1}vb#o4@WN`!_A>K2;1SbmO z0DzM$jL&_sNELtrt5!L~f5NW|)J)BldQDe6Akj58c}_8ZB)GNrev9&}z9~X;*aOhW zfKdkB2(tj+EWyM|U7_}ZfDv~b4G1}AT)+Et6^4bwENtgp1)DEbjJZs?IW}5(G>V~luD6joYPJOOrWTDF?bA3?D;v(A1 zX_s*q(R{LjwvWE2!c0?0$%if2GWlFaj%F#S(sZ>eea~DbR2r$XLNGYS`TZ+A3zv{P zdbN`@wFkKOVg@pkNe~@NuVMS&d zFG+ct!Mg(h=cyKwi60X597F0bK*w? z*Y5r{d0%c@FzY99H0ck?Z>5k&E4J%t2}vD*Ee`LNU}1keAnsS0E*z+@iO_? zPgckQ(98oo5|O8HknDh_1f8Bvs9UDfzR{vk=f+lK8NZlGW2^v0uxz0G^E6jBXGKB3 z+1WDZtwaUT2q7C?^*=YB~K`+zEH`ALC*9o3L8Qt?T;;Zi3%lgQokTi`w{nt5xYC8 zQeJkA`GS>y=BRY`>VAAVFmt-aXkL;@bxDft$e*KZC5mcGUTu*;93MU^*p;zqL;I7w z`&w~m_@C)g^VM|4xG{Yj%987)X(f<$N|rew1IW{o>Vl{oy*6rtI4M`@IjNr+qW-+3 z=IpXPS~={S@3J~H^@Uy%TC~em>TDl=Lv^cVGxJplC^ug=NjiYCJD;5bLLRs@nPlGZ zEy#^8CKdVaM+8fO(=fuS2U{m3f)@)<(C|~ zB?Ow+wQD3DBnyM^er>uOs)B4AWcW#(3xPc|`Jk5aBL&9-Ak6G~r{M>&=;@0!{3A8b zoPl^2h`}YtH1cIAnm4Ou5JkC)8g+p@%b-5_zrrAbJYK1iPF1{cWO~4JK*^6@9R-_~ z2kV|ABk=g2Cq=4ksXTQR0>@PEz=brY4aIZ7zWT3K0nNo zF!L*wkyYbQ#@o~s&6-+8Q9(%M#?BMm(&6heN22gMz$o>|p4Mg~50ccb0Ev1Gx*U!a zh&Tt4Lyyu+?r9-MTLqp!FEchDF><*gfp<0@1F*`z)COcla;iZ4nhOvmzxEju1FP(= zO7gh|iM6IT2$kB@VIN+sWqc|?Gl$FMWQBawM{ZE#uV@cyLch&kKrZ=9Yvm+?nIa<( z$;JfzCP@NOY77V!s_j*g<_hvEy&d+1oc1_G3e&tB_@b||# zYcRc6>a#22`C*7{sx^SdcQnDG+DHBnG^U$OlB;xRX7+?sr45oHLLb*=l6`aWGiXq& z2iXY~)a^jqOU4xPh>xwu?`H;AC^^%~)hh5Z%QIP-0%C*75Yo<-wUn%#XxXD=7|Nukhl}PQrwyLoMjrJ=hUIaM z+U1b_fw9~1ivgy%aDMA#kV5SI(J1?sw#64Rq3&RVz6mO1s&=m*me${!>C@Lr5w#V+ zHMR~Y4CYrO^@lJ-BSn@oHBXNJCy)cDV2_PNNwkC1QxUqAK`*()_a z5bCVyAzM4xMs~FZz^$ZQjY{~)P-^EkEz=smVCTy!tRRo8WPy(i`o)Lk3}RX~C}gbR zze=pY$qA?u7Y8MNA!EM>sd+0203wQ9n;jyT>jBXExzc;bBnY}lgBV-2oF9a9^|?1T z9Xh~_YsmRBESYA>J=)ZRdH}e@wu0M`l6)C+iQkug34by^zk29W+bW1VB-1mrIX*I75nN>$_=>|5$c}cf@bV7% z5b2l2sYCb9o{Id}B_I|7uMJF~6gk<{jv9D6*{G@Tf`R78_^9zX>Jpz8lF8227HNQU z{f9xpsc8%RQWu1qZb-?1|Muc{cRwPq3O*$MI`tNNK`58We}G)AvC~qgp` zE>BMnMl$P->1>&}hBS#V*hrrwWX;2XW(g0lW^ zA3jA~gsPnrWtgDXbdV2%D68lF0iN_h;4HaV}dus{Sp|S9<5{aVF|h(>uooK0AMg} z%Qa4p`BWHyp0>_E4l36p2YqBSVLKyx{rLKj!g8QM$PJSA)kOTIRCMgjlc<#GnEFD1 zyiCaR6UhyF0Ei#n{)!&0InZ%EN=BbIf!q?%u9`5PLEHmrg2@~hb6_XQ!O%-^6H!7{GO?3%NPwquNCM|hZVaP@_<0m=0boF}VrKdOlFN~Q zYLB&y-eP>OeP86sNs?Qu;GZ_5+QTi{(GKzhi7XAK-^BBX<3|a7wh~waffm>Hb&fru zLiU{_0mYb-IiW(XpEAzV{-uklh(92dOzldwd3apOl*rrC;Rlhkvnvc%1X9WZUKjun zCUfx=3?$a&*?81nfom1lg&_i3%rVK&D8C%Puie$EmQ85F zcho?MWwVW+ZN(c->jPe0!@&u;*_u-{|AueYE+!-AqdeYB9{Ha{(T}LXRp(5syGQ6A`gkVr-fRLzZj=$FSetkeuDx+nP#C+ za0s|o(asO+b0T=FVDST!RY+O9&Z7W@?t5@Asv2L0AkVejp0WDy-$00huT*U7naRlP zejZ|r<5XDgke4*E+xRzmJwT!g9BgjDGsjj-H0x+PB9S+JA;-*SZA+YqHvw%x1Dget z{K+SnPNYjBtz`655OTLhi6y0I6ewlT^jLXhkpwE6m@zU7#Db?C4o8SAf&C%LiR9l3 zxuhlYJVYf$@_>K%IaEu%O$LJ_mkYM5ebIqGlL|1D``X67VH}UYBbYq$lc@0t_Tz5$ zSqO5n0tk=KqHrycZ#v?A)TY1zVBlA+JTG-C1lr(O!T1*rze~3ID_L!VBnMH_cmOH! zRUsEL<+@2eZY9_2u2P~|O5`kA8M*JNI-at0~z1Eb-f*4>Yp+rJHc`lgEppI5)3;p`h2||L( zA8YR`XcM`r1BXS@(}l!fdk4`BEf^8Q(o$xLyR=K&u_C0bv$h7HX%1MkM#SsYg8 z460yWC|(=x0~_Tv4Dma&zk$`h)hpAH9sY^`HIgvL;3T6HQpUL{m6mH2vc?By0CGlZ zcHf|H>P){fE^HqmC{ON?@gd}COFAfA6`Gy`G0o23cU~QAdW6H|g~K1tLUH0pC^#Va z6J9K42-?(%TS!a}?lKO+n|`AqUQvQ^RA_!p{4(z0_X~NJzC|pMt7TM;x2manGAXt~ zl2(VSC2?b*kHPJm|79pI6fC`6J2gCdv36rXb(!mCo{$$KORxFKAE^YFGTJw(ROqtg z1Q`v8w$V3!8@hD$y0v|292Jj=9Scsh2hN%Ijp1&=TBN5a%J0;)zo{W8bY|7rZT+Ov zCoNJo^ejB-+>23$J=*z*uXAHSEjY}SZ&x!km0>4MY)#!QFeh^n%ILpr6>pyyMyc*A zcFxfskPjyz;@TZBIt2h=sSUBCVQ8xPp##^Wo~hrLhR=z-I)!{{pfYik;mN4Pu7qr} zX9ff+D05RAxnzQxwY?I401c*U;{2fvII8WR z*4-K?f}!Txdr>%Ux%kWtcovASJrV6Dh`Fr;e~nxjCofEqfbwIVN*LzIc%(&cmmw!E zlKm>8+N?PKE>g7r%4DH#xKg29BVw!nMKG#mMy`#ItqH-n1;U1b46#-3L~=xzK#`H_ zefqu8p6$4{A&|4dt?P+E08~h=8lf%P*X_fH1hW|XWxIY`t9&Bzac2k=G~|WG9=U;c zM&6K7fjp~4X0&TpwZz`X5wuiS{0zecfm10>ElE8Wh~q&}wf*7lE90C0@O=98;ajyRoiKC+x_gUQD0RJ&M~;D5DW=*DSbkWrPYSMK~si3DV6 zm0cI>7!115l{b!%`2?=H>SW_2a&;%-dYc^odZ>(+#S`K^Al&Nt;Tu)>{X9|AKL`P2 zU(?A{MVvXj=DiQqzEoxz?h2Mb)1I+G6!Y2^i7si-?J$~cBfXKF9t-5)2EqOCTDcpa z*Yi&VSbUx0889$yFgQ88N>(ND_lA$9-V&-d`LWJ-LwsJ=?oXl$rZ9zG0A@Ls1U92V?p;7Lcu1xJMa*DRDf576QoN+MdKZ{Xl0wQO55_b@O7-ebv<#n2c4HNt*rdMVrxA^Z57Ycxg znj5=ci@z`a$H=ccTXpShmQ-2<#Wjj}Tl`0GubP8vr!O_z&%VGHYB2-<5kfvD2JyGZ zr7~_y>Ej8i_DpQ#`0z1s{+lKCXBbHTAn~mzh9B0B_;C#7$Y5Mw8yD;v`!D2UWO`7@ zmE!Y&mfR|-_C%zFx*a~I5ZOAJOm0b?hu-`#W&p`4RGkMpjNDuTCbP zXrwz3Ikm-jXV&c4ZPUoM98(TnH9m35bGj7MaakS-vcqYIE=-E%xa!Rw$9Q&olHKSOfH?H zdhkIS4H@EOaC5J#^2OShQvlzd2G`Q$x*q+oEYw^L6sHmy4tD#r*Ceu#>N6%8pM)!* z^>Qpv9w&N98hwnood`ayL>A9L^M*oVQG2`{G5y6bNN(fl)I*iK^xtWzV*=dNP7v&} zwo1WcsX<8WW^~>k2g1KGu9vhBcn!}SB( Note: gorilla/context, having been born well before `context.Context` existed, does not play well +> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`. + Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context_test.go b/vendor/github.com/gorilla/context/context_test.go deleted file mode 100644 index 9814c501..00000000 --- a/vendor/github.com/gorilla/context/context_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "testing" -) - -type keyType int - -const ( - key1 keyType = iota - key2 -) - -func TestContext(t *testing.T) { - assertEqual := func(val interface{}, exp interface{}) { - if val != exp { - t.Errorf("Expected %v, got %v.", exp, val) - } - } - - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - - // Get() - assertEqual(Get(r, key1), nil) - - // Set() - Set(r, key1, "1") - assertEqual(Get(r, key1), "1") - assertEqual(len(data[r]), 1) - - Set(r, key2, "2") - assertEqual(Get(r, key2), "2") - assertEqual(len(data[r]), 2) - - //GetOk - value, ok := GetOk(r, key1) - assertEqual(value, "1") - assertEqual(ok, true) - - value, ok = GetOk(r, "not exists") - assertEqual(value, nil) - assertEqual(ok, false) - - Set(r, "nil value", nil) - value, ok = GetOk(r, "nil value") - assertEqual(value, nil) - assertEqual(ok, true) - - // GetAll() - values := GetAll(r) - assertEqual(len(values), 3) - - // GetAll() for empty request - values = GetAll(emptyR) - if values != nil { - t.Error("GetAll didn't return nil value for invalid request") - } - - // GetAllOk() - values, ok = GetAllOk(r) - assertEqual(len(values), 3) - assertEqual(ok, true) - - // GetAllOk() for empty request - values, ok = GetAllOk(emptyR) - assertEqual(value, nil) - assertEqual(ok, false) - - // Delete() - Delete(r, key1) - assertEqual(Get(r, key1), nil) - assertEqual(len(data[r]), 2) - - Delete(r, key2) - assertEqual(Get(r, key2), nil) - assertEqual(len(data[r]), 1) - - // Clear() - Clear(r) - assertEqual(len(data), 0) -} - -func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Get(r, key) - } - done <- struct{}{} - -} - -func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Set(r, key, value) - } - done <- struct{}{} - -} - -func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { - - b.StopTimer() - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - done := make(chan struct{}) - b.StartTimer() - - for i := 0; i < b.N; i++ { - wait := make(chan struct{}) - - for i := 0; i < numReaders; i++ { - go parallelReader(r, "test", iterations, wait, done) - } - - for i := 0; i < numWriters; i++ { - go parallelWriter(r, "test", "123", iterations, wait, done) - } - - close(wait) - - for i := 0; i < numReaders+numWriters; i++ { - <-done - } - - } - -} - -func BenchmarkMutexSameReadWrite1(b *testing.B) { - benchmarkMutex(b, 1, 1, 32) -} -func BenchmarkMutexSameReadWrite2(b *testing.B) { - benchmarkMutex(b, 2, 2, 32) -} -func BenchmarkMutexSameReadWrite4(b *testing.B) { - benchmarkMutex(b, 4, 4, 32) -} -func BenchmarkMutex1(b *testing.B) { - benchmarkMutex(b, 2, 8, 32) -} -func BenchmarkMutex2(b *testing.B) { - benchmarkMutex(b, 16, 4, 64) -} -func BenchmarkMutex3(b *testing.B) { - benchmarkMutex(b, 1, 2, 128) -} -func BenchmarkMutex4(b *testing.B) { - benchmarkMutex(b, 128, 32, 256) -} -func BenchmarkMutex5(b *testing.B) { - benchmarkMutex(b, 1024, 2048, 64) -} -func BenchmarkMutex6(b *testing.B) { - benchmarkMutex(b, 2048, 1024, 512) -} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go index 73c74003..448d1bfc 100644 --- a/vendor/github.com/gorilla/context/doc.go +++ b/vendor/github.com/gorilla/context/doc.go @@ -5,6 +5,12 @@ /* Package context stores values shared during a request lifetime. +Note: gorilla/context, having been born well before `context.Context` existed, +does not play well > with the shallow copying of the request that +[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) +(added to net/http Go 1.7 onwards) performs. You should either use *just* +gorilla/context, or moving forward, the new `http.Request.Context()`. + For example, a router can set variables extracted from the URL and later application handlers can access those values, or it can be used to store sessions values to be saved at the end of a request. There are several diff --git a/vendor/github.com/gorilla/mux/bench_test.go b/vendor/github.com/gorilla/mux/bench_test.go deleted file mode 100644 index 522156dc..00000000 --- a/vendor/github.com/gorilla/mux/bench_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func BenchmarkMux(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}", handler) - - request, _ := http.NewRequest("GET", "/v1/anything", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, request) - } -} - -func BenchmarkMuxAlternativeInRegexp(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1:(?:a|b)}", handler) - - requestA, _ := http.NewRequest("GET", "/v1/a", nil) - requestB, _ := http.NewRequest("GET", "/v1/b", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, requestA) - router.ServeHTTP(nil, requestB) - } -} - -func BenchmarkManyPathVariables(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}/{v2}/{v3}/{v4}/{v5}", handler) - - matchingRequest, _ := http.NewRequest("GET", "/v1/1/2/3/4/5", nil) - notMatchingRequest, _ := http.NewRequest("GET", "/v1/1/2/3/4", nil) - recorder := httptest.NewRecorder() - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, matchingRequest) - router.ServeHTTP(recorder, notMatchingRequest) - } -} diff --git a/vendor/github.com/gorilla/mux/context_gorilla_test.go b/vendor/github.com/gorilla/mux/context_gorilla_test.go deleted file mode 100644 index ffaf384c..00000000 --- a/vendor/github.com/gorilla/mux/context_gorilla_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build !go1.7 - -package mux - -import ( - "net/http" - "testing" - - "github.com/gorilla/context" -) - -// Tests that the context is cleared or not cleared properly depending on -// the configuration of the router -func TestKeepContext(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - res := new(http.ResponseWriter) - r.ServeHTTP(*res, req) - - if _, ok := context.GetOk(req, "t"); ok { - t.Error("Context should have been cleared at end of request") - } - - r.KeepContext = true - - req, _ = http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - r.ServeHTTP(*res, req) - if _, ok := context.GetOk(req, "t"); !ok { - t.Error("Context should NOT have been cleared at end of request") - } - -} diff --git a/vendor/github.com/gorilla/mux/context_native_test.go b/vendor/github.com/gorilla/mux/context_native_test.go deleted file mode 100644 index c150edf0..00000000 --- a/vendor/github.com/gorilla/mux/context_native_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build go1.7 - -package mux - -import ( - "context" - "net/http" - "testing" - "time" -) - -func TestNativeContextMiddleware(t *testing.T) { - withTimeout := func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx, cancel := context.WithTimeout(r.Context(), time.Minute) - defer cancel() - h.ServeHTTP(w, r.WithContext(ctx)) - }) - } - - r := NewRouter() - r.Handle("/path/{foo}", withTimeout(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - vars := Vars(r) - if vars["foo"] != "bar" { - t.Fatal("Expected foo var to be set") - } - }))) - - rec := NewRecorder() - req := newRequest("GET", "/path/bar") - r.ServeHTTP(rec, req) -} diff --git a/vendor/github.com/gorilla/mux/mux_test.go b/vendor/github.com/gorilla/mux/mux_test.go deleted file mode 100644 index 484fab43..00000000 --- a/vendor/github.com/gorilla/mux/mux_test.go +++ /dev/null @@ -1,1912 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "net/http" - "net/url" - "reflect" - "strings" - "testing" -) - -func (r *Route) GoString() string { - matchers := make([]string, len(r.matchers)) - for i, m := range r.matchers { - matchers[i] = fmt.Sprintf("%#v", m) - } - return fmt.Sprintf("&Route{matchers:[]matcher{%s}}", strings.Join(matchers, ", ")) -} - -func (r *routeRegexp) GoString() string { - return fmt.Sprintf("&routeRegexp{template: %q, matchHost: %t, matchQuery: %t, strictSlash: %t, regexp: regexp.MustCompile(%q), reverse: %q, varsN: %v, varsR: %v", r.template, r.matchHost, r.matchQuery, r.strictSlash, r.regexp.String(), r.reverse, r.varsN, r.varsR) -} - -type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - scheme string // the expected scheme of the built URL - host string // the expected host of the built URL - path string // the expected path of the built URL - query string // the expected query string of the built URL - pathTemplate string // the expected path template of the route - hostTemplate string // the expected host template of the route - methods []string // the expected route methods - pathRegexp string // the expected path regexp - shouldMatch bool // whether the request is expected to match the route at all - shouldRedirect bool // whether the request should result in a redirect -} - -func TestHost(t *testing.T) { - // newRequestHost a new request with a method, url, and host header - newRequestHost := func(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req - } - - tests := []routeTest{ - { - title: "Host route match", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong port in request URL", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route, match with host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, - { - title: "Host route with port, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: true, - }, - { - title: "Host route with pattern, additional capturing group, match", - route: new(Route).Host("aaa.{v1:[a-z]{2}(?:b|c)}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v1:[a-z]{2}(?:b|c)}.ccc`, - shouldMatch: true, - }, - { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: false, - }, - { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, - shouldMatch: true, - }, - { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, - shouldMatch: false, - }, - { - title: "Host route with hyphenated name and pattern, match", - route: new(Route).Host("aaa.{v-1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v-1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v-1:[a-z]{3}}.ccc`, - shouldMatch: true, - }, - { - title: "Host route with hyphenated name and pattern, additional capturing group, match", - route: new(Route).Host("aaa.{v-1:[a-z]{2}(?:b|c)}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v-1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `aaa.{v-1:[a-z]{2}(?:b|c)}.ccc`, - shouldMatch: true, - }, - { - title: "Host route with multiple hyphenated names and patterns, match", - route: new(Route).Host("{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v-1": "aaa", "v-2": "bbb", "v-3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - hostTemplate: `{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}`, - shouldMatch: true, - }, - } - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - } -} - -func TestPath(t *testing.T) { - tests := []routeTest{ - { - title: "Path route, match", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route, match with trailing slash in request and path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - }, - { - title: "Path route, do not match with trailing slash in path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - pathTemplate: `/111/`, - pathRegexp: `^/111/$`, - shouldMatch: false, - }, - { - title: "Path route, do not match with trailing slash in request", - route: new(Route).Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - pathTemplate: `/111`, - shouldMatch: false, - }, - { - title: "Path route, match root with no host", - route: new(Route).Path("/"), - request: newRequest("GET", "/"), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - pathRegexp: `^/$`, - shouldMatch: true, - }, - { - title: "Path route, match root with no host, App Engine format", - route: new(Route).Path("/"), - request: func() *http.Request { - r := newRequest("GET", "http://localhost/") - r.RequestURI = "/" - return r - }(), - vars: map[string]string{}, - host: "", - path: "/", - pathTemplate: `/`, - shouldMatch: true, - }, - { - title: "Path route, wrong path in request in request URL", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - pathTemplate: `/111/{v1:[0-9]{3}}/333`, - shouldMatch: true, - }, - { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - pathTemplate: `/111/{v1:[0-9]{3}}/333`, - pathRegexp: `^/111/(?P[0-9]{3})/333$`, - shouldMatch: false, - }, - { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}`, - pathRegexp: `^/(?P[0-9]{3})/(?P[0-9]{3})/(?P[0-9]{3})$`, - shouldMatch: true, - }, - { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}`, - pathRegexp: `^/(?P[0-9]{3})/(?P[0-9]{3})/(?P[0-9]{3})$`, - shouldMatch: false, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|(?:b/c)}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, - host: "", - path: "/a/product_name/1", - pathTemplate: `/{category:a|(?:b/c)}/{product}/{id:[0-9]+}`, - pathRegexp: `^/(?Pa|(?:b/c))/(?P[^/]+)/(?P[0-9]+)$`, - shouldMatch: true, - }, - { - title: "Path route with hyphenated name and pattern, match", - route: new(Route).Path("/111/{v-1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v-1": "222"}, - host: "", - path: "/111/222/333", - pathTemplate: `/111/{v-1:[0-9]{3}}/333`, - pathRegexp: `^/111/(?P[0-9]{3})/333$`, - shouldMatch: true, - }, - { - title: "Path route with multiple hyphenated names and patterns, match", - route: new(Route).Path("/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v-1": "111", "v-2": "222", "v-3": "333"}, - host: "", - path: "/111/222/333", - pathTemplate: `/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}`, - pathRegexp: `^/(?P[0-9]{3})/(?P[0-9]{3})/(?P[0-9]{3})$`, - shouldMatch: true, - }, - { - title: "Path route with multiple hyphenated names and patterns with pipe, match", - route: new(Route).Path("/{product-category:a|(?:b/c)}/{product-name}/{product-id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"product-category": "a", "product-name": "product_name", "product-id": "1"}, - host: "", - path: "/a/product_name/1", - pathTemplate: `/{product-category:a|(?:b/c)}/{product-name}/{product-id:[0-9]+}`, - pathRegexp: `^/(?Pa|(?:b/c))/(?P[^/]+)/(?P[0-9]+)$`, - shouldMatch: true, - }, - { - title: "Path route with multiple hyphenated names and patterns with pipe and case insensitive, match", - route: new(Route).Path("/{type:(?i:daily|mini|variety)}-{date:\\d{4,4}-\\d{2,2}-\\d{2,2}}"), - request: newRequest("GET", "http://localhost/daily-2016-01-01"), - vars: map[string]string{"type": "daily", "date": "2016-01-01"}, - host: "", - path: "/daily-2016-01-01", - pathTemplate: `/{type:(?i:daily|mini|variety)}-{date:\d{4,4}-\d{2,2}-\d{2,2}}`, - pathRegexp: `^/(?P(?i:daily|mini|variety))-(?P\d{4,4}-\d{2,2}-\d{2,2})$`, - shouldMatch: true, - }, - { - title: "Path route with empty match right after other match", - route: new(Route).Path(`/{v1:[0-9]*}{v2:[a-z]*}/{v3:[0-9]*}`), - request: newRequest("GET", "http://localhost/111/222"), - vars: map[string]string{"v1": "111", "v2": "", "v3": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/{v1:[0-9]*}{v2:[a-z]*}/{v3:[0-9]*}`, - pathRegexp: `^/(?P[0-9]*)(?P[a-z]*)/(?P[0-9]*)$`, - shouldMatch: true, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/a"), - vars: map[string]string{"category": "a"}, - host: "", - path: "/a", - pathTemplate: `/{category:a|b/c}`, - shouldMatch: true, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/b/c"), - vars: map[string]string{"category": "b/c"}, - host: "", - path: "/b/c", - pathTemplate: `/{category:a|b/c}`, - shouldMatch: true, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, - host: "", - path: "/a/product_name/1", - pathTemplate: `/{category:a|b/c}/{product}/{id:[0-9]+}`, - shouldMatch: true, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/b/c/product_name/1"), - vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, - host: "", - path: "/b/c/product_name/1", - pathTemplate: `/{category:a|b/c}/{product}/{id:[0-9]+}`, - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - testRegexp(t, test) - } -} - -func TestPathPrefix(t *testing.T) { - tests := []routeTest{ - { - title: "PathPrefix route, match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - }, - { - title: "PathPrefix route, match substring", - route: new(Route).PathPrefix("/1"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/1", - shouldMatch: true, - }, - { - title: "PathPrefix route, URL prefix in request does not match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/111/{v1:[0-9]{3}}`, - shouldMatch: true, - }, - { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/111/{v1:[0-9]{3}}`, - shouldMatch: false, - }, - { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}`, - shouldMatch: true, - }, - { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - pathTemplate: `/{v1:[0-9]{3}}/{v2:[0-9]{3}}`, - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - } -} - -func TestSchemeHostPath(t *testing.T) { - tests := []routeTest{ - { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/222/333`, - hostTemplate: `aaa.bbb.ccc`, - shouldMatch: true, - }, - { - title: "Scheme, Host, and Path route, match", - route: new(Route).Schemes("https").Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "https://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - scheme: "https", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/222/333`, - hostTemplate: `aaa.bbb.ccc`, - shouldMatch: true, - }, - { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/222/333`, - hostTemplate: `aaa.bbb.ccc`, - shouldMatch: false, - }, - { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/{v2:[0-9]{3}}/333`, - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: true, - }, - { - title: "Scheme, Host, and Path route with host and path patterns, match", - route: new(Route).Schemes("ftp", "ssss").Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "ssss://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - scheme: "ftp", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/{v2:[0-9]{3}}/333`, - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: true, - }, - { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/111/{v2:[0-9]{3}}/333`, - hostTemplate: `aaa.{v1:[a-z]{3}}.ccc`, - shouldMatch: false, - }, - { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}`, - hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, - shouldMatch: true, - }, - { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - scheme: "http", - host: "aaa.bbb.ccc", - path: "/111/222/333", - pathTemplate: `/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}`, - hostTemplate: `{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}`, - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - } -} - -func TestHeaders(t *testing.T) { - // newRequestHeaders creates a new request with a method, url, and headers - newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - for k, v := range headers { - req.Header.Add(k, v) - } - return req - } - - tests := []routeTest{ - { - title: "Headers route, match", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Headers route, bad header values", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Headers route, regex header values to match", - route: new(Route).Headers("foo", "ba[zr]"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Headers route, regex header values to match", - route: new(Route).HeadersRegexp("foo", "ba[zr]"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "baz"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - } -} - -func TestMethods(t *testing.T) { - tests := []routeTest{ - { - title: "Methods route, match GET", - route: new(Route).Methods("GET", "POST"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - methods: []string{"GET", "POST"}, - shouldMatch: true, - }, - { - title: "Methods route, match POST", - route: new(Route).Methods("GET", "POST"), - request: newRequest("POST", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - methods: []string{"GET", "POST"}, - shouldMatch: true, - }, - { - title: "Methods route, bad method", - route: new(Route).Methods("GET", "POST"), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - methods: []string{"GET", "POST"}, - shouldMatch: false, - }, - { - title: "Route without methods", - route: new(Route), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - methods: []string{}, - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - testMethods(t, test) - } -} - -func TestQueries(t *testing.T) { - tests := []routeTest{ - { - title: "Queries route, match", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=bar&baz=ding", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=bar&baz=ding", - pathTemplate: `/api`, - hostTemplate: `www.example.com`, - shouldMatch: true, - }, - { - title: "Queries route, match with a query string out of order", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=bar&baz=ding", - pathTemplate: `/api`, - hostTemplate: `www.example.com`, - shouldMatch: true, - }, - { - title: "Queries route, bad query", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v1": "bar"}, - host: "", - path: "", - query: "foo=bar", - shouldMatch: true, - }, - { - title: "Queries route with multiple patterns, match", - route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "", - query: "foo=bar&baz=ding", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v1": "10"}, - host: "", - path: "", - query: "foo=10", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=a"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with regexp pattern with quantifier, match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=1"), - vars: map[string]string{"v1": "1"}, - host: "", - path: "", - query: "foo=1", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, additional variable in query string, match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?bar=2&foo=1"), - vars: map[string]string{"v1": "1"}, - host: "", - path: "", - query: "foo=1", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=12"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with regexp pattern with quantifier, additional capturing group", - route: new(Route).Queries("foo", "{v1:[0-9]{1}(?:a|b)}"), - request: newRequest("GET", "http://localhost?foo=1a"), - vars: map[string]string{"v1": "1a"}, - host: "", - path: "", - query: "foo=1a", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=12"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with hyphenated name, match", - route: new(Route).Queries("foo", "{v-1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v-1": "bar"}, - host: "", - path: "", - query: "foo=bar", - shouldMatch: true, - }, - { - title: "Queries route with multiple hyphenated names, match", - route: new(Route).Queries("foo", "{v-1}", "baz", "{v-2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v-1": "bar", "v-2": "ding"}, - host: "", - path: "", - query: "foo=bar&baz=ding", - shouldMatch: true, - }, - { - title: "Queries route with hyphenate name and pattern, match", - route: new(Route).Queries("foo", "{v-1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v-1": "10"}, - host: "", - path: "", - query: "foo=10", - shouldMatch: true, - }, - { - title: "Queries route with hyphenated name and pattern with quantifier, additional capturing group", - route: new(Route).Queries("foo", "{v-1:[0-9]{1}(?:a|b)}"), - request: newRequest("GET", "http://localhost?foo=1a"), - vars: map[string]string{"v-1": "1a"}, - host: "", - path: "", - query: "foo=1a", - shouldMatch: true, - }, - { - title: "Queries route with empty value, should match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=", - shouldMatch: true, - }, - { - title: "Queries route with empty value and no parameter in request, should not match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with empty value and empty parameter in request, should match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost?foo="), - vars: map[string]string{}, - host: "", - path: "", - query: "foo=", - shouldMatch: true, - }, - { - title: "Queries route with overlapping value, should not match", - route: new(Route).Queries("foo", "bar"), - request: newRequest("GET", "http://localhost?foo=barfoo"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with no parameter in request, should not match", - route: new(Route).Queries("foo", "{bar}"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with empty parameter in request, should match", - route: new(Route).Queries("foo", "{bar}"), - request: newRequest("GET", "http://localhost?foo="), - vars: map[string]string{"foo": ""}, - host: "", - path: "", - query: "foo=", - shouldMatch: true, - }, - { - title: "Queries route, bad submatch", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?fffoo=bar&baz=dingggg"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match, escaped value", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=%25bar%26%20%2F%3D%3F"), - vars: map[string]string{"v1": "%bar& /=?"}, - host: "", - path: "", - query: "foo=%25bar%26+%2F%3D%3F", - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - } -} - -func TestSchemes(t *testing.T) { - tests := []routeTest{ - // Schemes - { - title: "Schemes route, default scheme, match http, build http", - route: new(Route).Host("localhost"), - request: newRequest("GET", "http://localhost"), - scheme: "http", - host: "localhost", - shouldMatch: true, - }, - { - title: "Schemes route, match https, build https", - route: new(Route).Schemes("https", "ftp").Host("localhost"), - request: newRequest("GET", "https://localhost"), - scheme: "https", - host: "localhost", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp, build https", - route: new(Route).Schemes("https", "ftp").Host("localhost"), - request: newRequest("GET", "ftp://localhost"), - scheme: "https", - host: "localhost", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp, build ftp", - route: new(Route).Schemes("ftp", "https").Host("localhost"), - request: newRequest("GET", "ftp://localhost"), - scheme: "ftp", - host: "localhost", - shouldMatch: true, - }, - { - title: "Schemes route, bad scheme", - route: new(Route).Schemes("https", "ftp").Host("localhost"), - request: newRequest("GET", "http://localhost"), - scheme: "https", - host: "localhost", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - } -} - -func TestMatcherFunc(t *testing.T) { - m := func(r *http.Request, m *RouteMatch) bool { - if r.URL.Host == "aaa.bbb.ccc" { - return true - } - return false - } - - tests := []routeTest{ - { - title: "MatchFunc route, match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.bbb.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "MatchFunc route, non-match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.222.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - } -} - -func TestBuildVarsFunc(t *testing.T) { - tests := []routeTest{ - { - title: "BuildVarsFunc set on route", - route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "3" - vars["v2"] = "a" - return vars - }), - request: newRequest("GET", "http://localhost/111/2"), - path: "/111/3a", - pathTemplate: `/111/{v1:\d}{v2:.*}`, - shouldMatch: true, - }, - { - title: "BuildVarsFunc set on route and parent route", - route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "2" - return vars - }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v2"] = "b" - return vars - }), - request: newRequest("GET", "http://localhost/1/a"), - path: "/2/b", - pathTemplate: `/{v1:\d}/{v2:\w}`, - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - } -} - -func TestSubRouter(t *testing.T) { - subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() - subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() - subrouter3 := new(Route).PathPrefix("/foo").Subrouter() - subrouter4 := new(Route).PathPrefix("/foo/bar").Subrouter() - subrouter5 := new(Route).PathPrefix("/{category}").Subrouter() - - tests := []routeTest{ - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - pathTemplate: `/{v2:[a-z]+}`, - hostTemplate: `{v1:[a-z]+}.google.com`, - shouldMatch: true, - }, - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - pathTemplate: `/{v2:[a-z]+}`, - hostTemplate: `{v1:[a-z]+}.google.com`, - shouldMatch: false, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - pathTemplate: `/foo/{v1}/baz/{v2}`, - shouldMatch: true, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - pathTemplate: `/foo/{v1}/baz/{v2}`, - shouldMatch: false, - }, - { - route: subrouter3.Path("/"), - request: newRequest("GET", "http://localhost/foo/"), - vars: map[string]string{}, - host: "", - path: "/foo/", - pathTemplate: `/foo/`, - shouldMatch: true, - }, - { - route: subrouter3.Path(""), - request: newRequest("GET", "http://localhost/foo"), - vars: map[string]string{}, - host: "", - path: "/foo", - pathTemplate: `/foo`, - shouldMatch: true, - }, - - { - route: subrouter4.Path("/"), - request: newRequest("GET", "http://localhost/foo/bar/"), - vars: map[string]string{}, - host: "", - path: "/foo/bar/", - pathTemplate: `/foo/bar/`, - shouldMatch: true, - }, - { - route: subrouter4.Path(""), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{}, - host: "", - path: "/foo/bar", - pathTemplate: `/foo/bar`, - shouldMatch: true, - }, - { - route: subrouter5.Path("/"), - request: newRequest("GET", "http://localhost/baz/"), - vars: map[string]string{"category": "baz"}, - host: "", - path: "/baz/", - pathTemplate: `/{category}/`, - shouldMatch: true, - }, - { - route: subrouter5.Path(""), - request: newRequest("GET", "http://localhost/baz"), - vars: map[string]string{"category": "baz"}, - host: "", - path: "/baz", - pathTemplate: `/{category}`, - shouldMatch: true, - }, - { - title: "Build with scheme on parent router", - route: new(Route).Schemes("ftp").Host("google.com").Subrouter().Path("/"), - request: newRequest("GET", "ftp://google.com/"), - scheme: "ftp", - host: "google.com", - path: "/", - pathTemplate: `/`, - hostTemplate: `google.com`, - shouldMatch: true, - }, - { - title: "Prefer scheme on child route when building URLs", - route: new(Route).Schemes("https", "ftp").Host("google.com").Subrouter().Schemes("ftp").Path("/"), - request: newRequest("GET", "ftp://google.com/"), - scheme: "ftp", - host: "google.com", - path: "/", - pathTemplate: `/`, - hostTemplate: `google.com`, - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - } -} - -func TestNamedRoutes(t *testing.T) { - r1 := NewRouter() - r1.NewRoute().Name("a") - r1.NewRoute().Name("b") - r1.NewRoute().Name("c") - - r2 := r1.NewRoute().Subrouter() - r2.NewRoute().Name("d") - r2.NewRoute().Name("e") - r2.NewRoute().Name("f") - - r3 := r2.NewRoute().Subrouter() - r3.NewRoute().Name("g") - r3.NewRoute().Name("h") - r3.NewRoute().Name("i") - - if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { - t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) - } else if r1.Get("i") == nil { - t.Errorf("Subroute name not registered") - } -} - -func TestStrictSlash(t *testing.T) { - r := NewRouter() - r.StrictSlash(true) - - tests := []routeTest{ - { - title: "Redirect path without slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path with slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Redirect path with slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path without slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Propagate StrictSlash to subrouters", - route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), - request: newRequest("GET", "http://localhost/static/images"), - vars: map[string]string{}, - host: "", - path: "/static/images/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Ignore StrictSlash for path prefix", - route: r.NewRoute().PathPrefix("/static/"), - request: newRequest("GET", "http://localhost/static/logo.png"), - vars: map[string]string{}, - host: "", - path: "/static/", - shouldMatch: true, - shouldRedirect: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - testUseEscapedRoute(t, test) - } -} - -func TestUseEncodedPath(t *testing.T) { - r := NewRouter() - r.UseEncodedPath() - - tests := []routeTest{ - { - title: "Router with useEncodedPath, URL with encoded slash does match", - route: r.NewRoute().Path("/v1/{v1}/v2"), - request: newRequest("GET", "http://localhost/v1/1%2F2/v2"), - vars: map[string]string{"v1": "1%2F2"}, - host: "", - path: "/v1/1%2F2/v2", - pathTemplate: `/v1/{v1}/v2`, - shouldMatch: true, - }, - { - title: "Router with useEncodedPath, URL with encoded slash doesn't match", - route: r.NewRoute().Path("/v1/1/2/v2"), - request: newRequest("GET", "http://localhost/v1/1%2F2/v2"), - vars: map[string]string{"v1": "1%2F2"}, - host: "", - path: "/v1/1%2F2/v2", - pathTemplate: `/v1/1/2/v2`, - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - testTemplate(t, test) - } -} - -func TestWalkSingleDepth(t *testing.T) { - r0 := NewRouter() - r1 := NewRouter() - r2 := NewRouter() - - r0.Path("/g") - r0.Path("/o") - r0.Path("/d").Handler(r1) - r0.Path("/r").Handler(r2) - r0.Path("/a") - - r1.Path("/z") - r1.Path("/i") - r1.Path("/l") - r1.Path("/l") - - r2.Path("/i") - r2.Path("/l") - r2.Path("/l") - - paths := []string{"g", "o", "r", "i", "l", "l", "a"} - depths := []int{0, 0, 0, 1, 1, 1, 0} - i := 0 - err := r0.Walk(func(route *Route, router *Router, ancestors []*Route) error { - matcher := route.matchers[0].(*routeRegexp) - if matcher.template == "/d" { - return SkipRouter - } - if len(ancestors) != depths[i] { - t.Errorf(`Expected depth of %d at i = %d; got "%d"`, depths[i], i, len(ancestors)) - } - if matcher.template != "/"+paths[i] { - t.Errorf(`Expected "/%s" at i = %d; got "%s"`, paths[i], i, matcher.template) - } - i++ - return nil - }) - if err != nil { - panic(err) - } - if i != len(paths) { - t.Errorf("Expected %d routes, found %d", len(paths), i) - } -} - -func TestWalkNested(t *testing.T) { - router := NewRouter() - - g := router.Path("/g").Subrouter() - o := g.PathPrefix("/o").Subrouter() - r := o.PathPrefix("/r").Subrouter() - i := r.PathPrefix("/i").Subrouter() - l1 := i.PathPrefix("/l").Subrouter() - l2 := l1.PathPrefix("/l").Subrouter() - l2.Path("/a") - - testCases := []struct { - path string - ancestors []*Route - }{ - {"/g", []*Route{}}, - {"/g/o", []*Route{g.parent.(*Route)}}, - {"/g/o/r", []*Route{g.parent.(*Route), o.parent.(*Route)}}, - {"/g/o/r/i", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route)}}, - {"/g/o/r/i/l", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route), i.parent.(*Route)}}, - {"/g/o/r/i/l/l", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route), i.parent.(*Route), l1.parent.(*Route)}}, - {"/g/o/r/i/l/l/a", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route), i.parent.(*Route), l1.parent.(*Route), l2.parent.(*Route)}}, - } - - idx := 0 - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - path := testCases[idx].path - tpl := route.regexp.path.template - if tpl != path { - t.Errorf(`Expected %s got %s`, path, tpl) - } - currWantAncestors := testCases[idx].ancestors - if !reflect.DeepEqual(currWantAncestors, ancestors) { - t.Errorf(`Expected %+v got %+v`, currWantAncestors, ancestors) - } - idx++ - return nil - }) - if err != nil { - panic(err) - } - if idx != len(testCases) { - t.Errorf("Expected %d routes, found %d", len(testCases), idx) - } -} - -func TestWalkSubrouters(t *testing.T) { - router := NewRouter() - - g := router.Path("/g").Subrouter() - o := g.PathPrefix("/o").Subrouter() - o.Methods("GET") - o.Methods("PUT") - - // all 4 routes should be matched, but final 2 routes do not have path templates - paths := []string{"/g", "/g/o", "", ""} - idx := 0 - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - path := paths[idx] - tpl, _ := route.GetPathTemplate() - if tpl != path { - t.Errorf(`Expected %s got %s`, path, tpl) - } - idx++ - return nil - }) - if err != nil { - panic(err) - } - if idx != len(paths) { - t.Errorf("Expected %d routes, found %d", len(paths), idx) - } -} - -func TestWalkErrorRoute(t *testing.T) { - router := NewRouter() - router.Path("/g") - expectedError := errors.New("error") - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - return expectedError - }) - if err != expectedError { - t.Errorf("Expected %v routes, found %v", expectedError, err) - } -} - -func TestWalkErrorMatcher(t *testing.T) { - router := NewRouter() - expectedError := router.Path("/g").Subrouter().Path("").GetError() - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - return route.GetError() - }) - if err != expectedError { - t.Errorf("Expected %v routes, found %v", expectedError, err) - } -} - -func TestWalkErrorHandler(t *testing.T) { - handler := NewRouter() - expectedError := handler.Path("/path").Subrouter().Path("").GetError() - router := NewRouter() - router.Path("/g").Handler(handler) - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - return route.GetError() - }) - if err != expectedError { - t.Errorf("Expected %v routes, found %v", expectedError, err) - } -} - -func TestSubrouterErrorHandling(t *testing.T) { - superRouterCalled := false - subRouterCalled := false - - router := NewRouter() - router.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - superRouterCalled = true - }) - subRouter := router.PathPrefix("/bign8").Subrouter() - subRouter.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - subRouterCalled = true - }) - - req, _ := http.NewRequest("GET", "http://localhost/bign8/was/here", nil) - router.ServeHTTP(NewRecorder(), req) - - if superRouterCalled { - t.Error("Super router 404 handler called when sub-router 404 handler is available.") - } - if !subRouterCalled { - t.Error("Sub-router 404 handler was not called.") - } -} - -// See: https://github.com/gorilla/mux/issues/200 -func TestPanicOnCapturingGroups(t *testing.T) { - defer func() { - if recover() == nil { - t.Errorf("(Test that capturing groups now fail fast) Expected panic, however test completed successfully.\n") - } - }() - NewRouter().NewRoute().Path("/{type:(promo|special)}/{promoId}.json") -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -func getRouteTemplate(route *Route) string { - host, err := route.GetHostTemplate() - if err != nil { - host = "none" - } - path, err := route.GetPathTemplate() - if err != nil { - path = "none" - } - return fmt.Sprintf("Host: %v, Path: %v", host, path) -} - -func testRoute(t *testing.T, test routeTest) { - request := test.request - route := test.route - vars := test.vars - shouldMatch := test.shouldMatch - query := test.query - shouldRedirect := test.shouldRedirect - uri := url.URL{ - Scheme: test.scheme, - Host: test.host, - Path: test.path, - } - if uri.Scheme == "" { - uri.Scheme = "http" - } - - var match RouteMatch - ok := route.Match(request, &match) - if ok != shouldMatch { - msg := "Should match" - if !shouldMatch { - msg = "Should not match" - } - t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) - return - } - if shouldMatch { - if vars != nil && !stringMapEqual(vars, match.Vars) { - t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) - return - } - if test.scheme != "" { - u, err := route.URL(mapToPairs(match.Vars)...) - if err != nil { - t.Fatalf("(%v) URL error: %v -- %v", test.title, err, getRouteTemplate(route)) - } - if uri.Scheme != u.Scheme { - t.Errorf("(%v) URLScheme not equal: expected %v, got %v", test.title, uri.Scheme, u.Scheme) - return - } - } - if test.host != "" { - u, err := test.route.URLHost(mapToPairs(match.Vars)...) - if err != nil { - t.Fatalf("(%v) URLHost error: %v -- %v", test.title, err, getRouteTemplate(route)) - } - if uri.Scheme != u.Scheme { - t.Errorf("(%v) URLHost scheme not equal: expected %v, got %v -- %v", test.title, uri.Scheme, u.Scheme, getRouteTemplate(route)) - return - } - if uri.Host != u.Host { - t.Errorf("(%v) URLHost host not equal: expected %v, got %v -- %v", test.title, uri.Host, u.Host, getRouteTemplate(route)) - return - } - } - if test.path != "" { - u, err := route.URLPath(mapToPairs(match.Vars)...) - if err != nil { - t.Fatalf("(%v) URLPath error: %v -- %v", test.title, err, getRouteTemplate(route)) - } - if uri.Path != u.Path { - t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, uri.Path, u.Path, getRouteTemplate(route)) - return - } - } - if test.host != "" && test.path != "" { - u, err := route.URL(mapToPairs(match.Vars)...) - if err != nil { - t.Fatalf("(%v) URL error: %v -- %v", test.title, err, getRouteTemplate(route)) - } - if expected, got := uri.String(), u.String(); expected != got { - t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, expected, got, getRouteTemplate(route)) - return - } - } - if query != "" { - u, _ := route.URL(mapToPairs(match.Vars)...) - if query != u.RawQuery { - t.Errorf("(%v) URL query not equal: expected %v, got %v", test.title, query, u.RawQuery) - return - } - } - if shouldRedirect && match.Handler == nil { - t.Errorf("(%v) Did not redirect", test.title) - return - } - if !shouldRedirect && match.Handler != nil { - t.Errorf("(%v) Unexpected redirect", test.title) - return - } - } -} - -func testUseEscapedRoute(t *testing.T, test routeTest) { - test.route.useEncodedPath = true - testRoute(t, test) -} - -func testTemplate(t *testing.T, test routeTest) { - route := test.route - pathTemplate := test.pathTemplate - if len(pathTemplate) == 0 { - pathTemplate = test.path - } - hostTemplate := test.hostTemplate - if len(hostTemplate) == 0 { - hostTemplate = test.host - } - - routePathTemplate, pathErr := route.GetPathTemplate() - if pathErr == nil && routePathTemplate != pathTemplate { - t.Errorf("(%v) GetPathTemplate not equal: expected %v, got %v", test.title, pathTemplate, routePathTemplate) - } - - routeHostTemplate, hostErr := route.GetHostTemplate() - if hostErr == nil && routeHostTemplate != hostTemplate { - t.Errorf("(%v) GetHostTemplate not equal: expected %v, got %v", test.title, hostTemplate, routeHostTemplate) - } -} - -func testMethods(t *testing.T, test routeTest) { - route := test.route - methods, _ := route.GetMethods() - if strings.Join(methods, ",") != strings.Join(test.methods, ",") { - t.Errorf("(%v) GetMethods not equal: expected %v, got %v", test.title, test.methods, methods) - } -} - -func testRegexp(t *testing.T, test routeTest) { - route := test.route - routePathRegexp, regexpErr := route.GetPathRegexp() - if test.pathRegexp != "" && regexpErr == nil && routePathRegexp != test.pathRegexp { - t.Errorf("(%v) GetPathRegexp not equal: expected %v, got %v", test.title, test.pathRegexp, routePathRegexp) - } -} - -type TestA301ResponseWriter struct { - hh http.Header - status int -} - -func (ho TestA301ResponseWriter) Header() http.Header { - return http.Header(ho.hh) -} - -func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { - return 0, nil -} - -func (ho TestA301ResponseWriter) WriteHeader(code int) { - ho.status = code -} - -func Test301Redirect(t *testing.T) { - m := make(http.Header) - - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - - res := TestA301ResponseWriter{ - hh: m, - status: 0, - } - r.ServeHTTP(&res, req) - - if "http://localhost/api/?abc=def" != res.hh["Location"][0] { - t.Errorf("Should have complete URL with query string") - } -} - -func TestSkipClean(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.SkipClean(true) - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - res := NewRecorder() - r.ServeHTTP(res, req) - - if len(res.HeaderMap["Location"]) != 0 { - t.Errorf("Shouldn't redirect since skip clean is disabled") - } -} - -// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW -func TestSubrouterHeader(t *testing.T) { - expected := "func1 response" - func1 := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, expected) - } - func2 := func(http.ResponseWriter, *http.Request) {} - - r := NewRouter() - s := r.Headers("SomeSpecialHeader", "").Subrouter() - s.HandleFunc("/", func1).Name("func1") - r.HandleFunc("/", func2).Name("func2") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - req.Header.Add("SomeSpecialHeader", "foo") - match := new(RouteMatch) - matched := r.Match(req, match) - if !matched { - t.Errorf("Should match request") - } - if match.Route.GetName() != "func1" { - t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) - } - resp := NewRecorder() - match.Handler.ServeHTTP(resp, req) - if resp.Body.String() != expected { - t.Errorf("Expecting %q", expected) - } -} - -// mapToPairs converts a string map to a slice of string pairs -func mapToPairs(m map[string]string) []string { - var i int - p := make([]string, len(m)*2) - for k, v := range m { - p[i] = k - p[i+1] = v - i += 2 - } - return p -} - -// stringMapEqual checks the equality of two string maps -func stringMapEqual(m1, m2 map[string]string) bool { - nil1 := m1 == nil - nil2 := m2 == nil - if nil1 != nil2 || len(m1) != len(m2) { - return false - } - for k, v := range m1 { - if v != m2[k] { - return false - } - } - return true -} - -// newRequest is a helper function to create a new request with a method and url. -// The request returned is a 'server' request as opposed to a 'client' one through -// simulated write onto the wire and read off of the wire. -// The differences between requests are detailed in the net/http package. -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - // extract the escaped original host+path from url - // http://localhost/path/here?v=1#frag -> //localhost/path/here - opaque := "" - if i := len(req.URL.Scheme); i > 0 { - opaque = url[i+1:] - } - - if i := strings.LastIndex(opaque, "?"); i > -1 { - opaque = opaque[:i] - } - if i := strings.LastIndex(opaque, "#"); i > -1 { - opaque = opaque[:i] - } - - // Escaped host+path workaround as detailed in https://golang.org/pkg/net/url/#URL - // for < 1.5 client side workaround - req.URL.Opaque = opaque - - // Simulate writing to wire - var buff bytes.Buffer - req.Write(&buff) - ioreader := bufio.NewReader(&buff) - - // Parse request off of 'wire' - req, err = http.ReadRequest(ioreader) - if err != nil { - panic(err) - } - return req -} - -func TestNoMatchMethodErrorHandler(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", func1).Methods("GET", "POST") - - req, _ := http.NewRequest("PUT", "http://localhost/", nil) - match := new(RouteMatch) - matched := r.Match(req, match) - - if matched { - t.Error("Should not have matched route for methods") - } - - if match.MatchErr != ErrMethodMismatch { - t.Error("Should get ErrMethodMismatch error") - } - - resp := NewRecorder() - r.ServeHTTP(resp, req) - if resp.Code != 405 { - t.Errorf("Expecting code %v", 405) - } - - // Add matching route - r.HandleFunc("/", func1).Methods("PUT") - - match = new(RouteMatch) - matched = r.Match(req, match) - - if !matched { - t.Error("Should have matched route for methods") - } - - if match.MatchErr != nil { - t.Error("Should not have any matching error. Found:", match.MatchErr) - } -} diff --git a/vendor/github.com/gorilla/mux/old_test.go b/vendor/github.com/gorilla/mux/old_test.go deleted file mode 100644 index 3751e472..00000000 --- a/vendor/github.com/gorilla/mux/old_test.go +++ /dev/null @@ -1,704 +0,0 @@ -// Old tests ported to Go1. This is a mess. Want to drop it one day. - -// Copyright 2011 Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -// ---------------------------------------------------------------------------- -// ResponseRecorder -// ---------------------------------------------------------------------------- -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} - -// ---------------------------------------------------------------------------- - -func TestRouteMatchers(t *testing.T) { - var scheme, host, path, query, method string - var headers map[string]string - var resultVars map[bool]map[string]string - - router := NewRouter() - router.NewRoute().Host("{var1}.google.com"). - Path("/{var2:[a-z]+}/{var3:[0-9]+}"). - Queries("foo", "bar"). - Methods("GET"). - Schemes("https"). - Headers("x-requested-with", "XMLHttpRequest") - router.NewRoute().Host("www.{var4}.com"). - PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). - Queries("baz", "ding"). - Methods("POST"). - Schemes("http"). - Headers("Content-Type", "application/json") - - reset := func() { - // Everything match. - scheme = "https" - host = "www.google.com" - path = "/product/42" - query = "?foo=bar" - method = "GET" - headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} - resultVars = map[bool]map[string]string{ - true: {"var1": "www", "var2": "product", "var3": "42"}, - false: {}, - } - } - - reset2 := func() { - // Everything match. - scheme = "http" - host = "www.google.com" - path = "/foo/product/42/path/that/is/ignored" - query = "?baz=ding" - method = "POST" - headers = map[string]string{"Content-Type": "application/json"} - resultVars = map[bool]map[string]string{ - true: {"var4": "google", "var5": "product", "var6": "42"}, - false: {}, - } - } - - match := func(shouldMatch bool) { - url := scheme + "://" + host + path + query - request, _ := http.NewRequest(method, url, nil) - for key, value := range headers { - request.Header.Add(key, value) - } - - var routeMatch RouteMatch - matched := router.Match(request, &routeMatch) - if matched != shouldMatch { - t.Errorf("Expected: %v\nGot: %v\nRequest: %v %v", shouldMatch, matched, request.Method, url) - } - - if matched { - currentRoute := routeMatch.Route - if currentRoute == nil { - t.Errorf("Expected a current route.") - } - vars := routeMatch.Vars - expectedVars := resultVars[shouldMatch] - if len(vars) != len(expectedVars) { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - for name, value := range vars { - if expectedVars[name] != value { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - } - } - } - - // 1st route -------------------------------------------------------------- - - // Everything match. - reset() - match(true) - - // Scheme doesn't match. - reset() - scheme = "http" - match(false) - - // Host doesn't match. - reset() - host = "www.mygoogle.com" - match(false) - - // Path doesn't match. - reset() - path = "/product/notdigits" - match(false) - - // Query doesn't match. - reset() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset() - method = "POST" - match(false) - - // Header doesn't match. - reset() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset() - match(true) - - // 2nd route -------------------------------------------------------------- - // Everything match. - reset2() - match(true) - - // Scheme doesn't match. - reset2() - scheme = "https" - match(false) - - // Host doesn't match. - reset2() - host = "sub.google.com" - match(false) - - // Path doesn't match. - reset2() - path = "/bar/product/42" - match(false) - - // Query doesn't match. - reset2() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset2() - method = "GET" - match(false) - - // Header doesn't match. - reset2() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset2() - match(true) -} - -type headerMatcherTest struct { - matcher headerMatcher - headers map[string]string - result bool -} - -var headerMatcherTests = []headerMatcherTest{ - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": ""}), - headers: map[string]string{"X-Requested-With": "anything"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{}, - result: false, - }, -} - -type hostMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var hostMatcherTests = []hostMatcherTest{ - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://abc.def.ghi/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://a.b.c/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: false, - }, -} - -type methodMatcherTest struct { - matcher methodMatcher - method string - result bool -} - -var methodMatcherTests = []methodMatcherTest{ - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "GET", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "POST", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "PUT", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "DELETE", - result: false, - }, -} - -type pathMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var pathMatcherTests = []pathMatcherTest{ - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/123/456/789", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/1/2/3", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: false, - }, -} - -type schemeMatcherTest struct { - matcher schemeMatcher - url string - result bool -} - -var schemeMatcherTests = []schemeMatcherTest{ - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "http://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "https://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"https"}), - url: "http://localhost:8080/", - result: false, - }, - { - matcher: schemeMatcher([]string{"http"}), - url: "https://localhost:8080/", - result: false, - }, -} - -type urlBuildingTest struct { - route *Route - vars []string - url string -} - -var urlBuildingTests = []urlBuildingTest{ - { - route: new(Route).Host("foo.domain.com"), - vars: []string{}, - url: "http://foo.domain.com", - }, - { - route: new(Route).Host("{subdomain}.domain.com"), - vars: []string{"subdomain", "bar"}, - url: "http://bar.domain.com", - }, - { - route: new(Route).Host("foo.domain.com").Path("/articles"), - vars: []string{}, - url: "http://foo.domain.com/articles", - }, - { - route: new(Route).Path("/articles"), - vars: []string{}, - url: "/articles", - }, - { - route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"category", "technology", "id", "42"}, - url: "/articles/technology/42", - }, - { - route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, - url: "http://foo.domain.com/articles/technology/42", - }, -} - -func TestHeaderMatcher(t *testing.T) { - for _, v := range headerMatcherTests { - request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - for key, value := range v.headers { - request.Header.Add(key, value) - } - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, request.Header) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, request.Header) - } - } - } -} - -func TestHostMatcher(t *testing.T) { - for _, v := range hostMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestMethodMatcher(t *testing.T) { - for _, v := range methodMatcherTests { - request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.method) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.method) - } - } - } -} - -func TestPathMatcher(t *testing.T) { - for _, v := range pathMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestSchemeMatcher(t *testing.T) { - for _, v := range schemeMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestUrlBuilding(t *testing.T) { - - for _, v := range urlBuildingTests { - u, _ := v.route.URL(v.vars...) - url := u.String() - if url != v.url { - t.Errorf("expected %v, got %v", v.url, url) - /* - reversePath := "" - reverseHost := "" - if v.route.pathTemplate != nil { - reversePath = v.route.pathTemplate.Reverse - } - if v.route.hostTemplate != nil { - reverseHost = v.route.hostTemplate.Reverse - } - - t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) - */ - } - } - - ArticleHandler := func(w http.ResponseWriter, r *http.Request) { - } - - router := NewRouter() - router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") - - url, _ := router.Get("article").URL("category", "technology", "id", "42") - expected := "/articles/technology/42" - if url.String() != expected { - t.Errorf("Expected %v, got %v", expected, url.String()) - } -} - -func TestMatchedRouteName(t *testing.T) { - routeName := "stock" - router := NewRouter() - route := router.NewRoute().Path("/products/").Name(routeName) - - url := "http://www.example.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - retName := rv.Route.GetName() - if retName != routeName { - t.Errorf("Expected %q, got %q.", routeName, retName) - } -} - -func TestSubRouting(t *testing.T) { - // Example from docs. - router := NewRouter() - subrouter := router.NewRoute().Host("www.example.com").Subrouter() - route := subrouter.NewRoute().Path("/products/").Name("products") - - url := "http://www.example.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - u, _ := router.Get("products").URL() - builtURL := u.String() - // Yay, subroute aware of the domain when building! - if builtURL != url { - t.Errorf("Expected %q, got %q.", url, builtURL) - } -} - -func TestVariableNames(t *testing.T) { - route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") - if route.err == nil { - t.Errorf("Expected error for duplicated variable names") - } -} - -func TestRedirectSlash(t *testing.T) { - var route *Route - var routeMatch RouteMatch - r := NewRouter() - - r.StrictSlash(false) - route = r.NewRoute() - if route.strictSlash != false { - t.Errorf("Expected false redirectSlash.") - } - - r.StrictSlash(true) - route = r.NewRoute() - if route.strictSlash != true { - t.Errorf("Expected true redirectSlash.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}/") - request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars := routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp := NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { - t.Errorf("Expected redirect header.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}") - request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars = routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp = NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { - t.Errorf("Expected redirect header.") - } -} - -// Test for the new regexp library, still not available in stable Go. -func TestNewRegexp(t *testing.T) { - var p *routeRegexp - var matches []string - - tests := map[string]map[string][]string{ - "/{foo:a{2}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": nil, - "/aaaa": nil, - }, - "/{foo:a{2,}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": {"aaaa"}, - }, - "/{foo:a{2,3}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": nil, - }, - "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abcd": nil, - "/abc/ab": {"abc", "ab"}, - "/abc/abc": nil, - "/abcd/ab": nil, - }, - `/{foo:\w{3,}}/{bar:\d{2,}}`: { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abc/1": nil, - "/abc/12": {"abc", "12"}, - "/abcd/12": {"abcd", "12"}, - "/abcd/123": {"abcd", "123"}, - }, - } - - for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false, false, false) - for path, result := range paths { - matches = p.regexp.FindStringSubmatch(path) - if result == nil { - if matches != nil { - t.Errorf("%v should not match %v.", pattern, path) - } - } else { - if len(matches) != len(result)+1 { - t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) - } else { - for k, v := range result { - if matches[k+1] != v { - t.Errorf("Expected %v, got %v.", v, matches[k+1]) - } - } - } - } - } - } -} diff --git a/vendor/github.com/justinas/alice/chain_test.go b/vendor/github.com/justinas/alice/chain_test.go deleted file mode 100644 index 97e0ff11..00000000 --- a/vendor/github.com/justinas/alice/chain_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Package alice implements a middleware chaining solution. -package alice - -import ( - "net/http" - "net/http/httptest" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" -) - -// A constructor for middleware -// that writes its own "tag" into the RW and does nothing else. -// Useful in checking if a chain is behaving in the right order. -func tagMiddleware(tag string) Constructor { - return func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(tag)) - h.ServeHTTP(w, r) - }) - } -} - -// Not recommended (https://golang.org/pkg/reflect/#Value.Pointer), -// but the best we can do. -func funcsEqual(f1, f2 interface{}) bool { - val1 := reflect.ValueOf(f1) - val2 := reflect.ValueOf(f2) - return val1.Pointer() == val2.Pointer() -} - -var testApp = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("app\n")) -}) - -// Tests creating a new chain -func TestNew(t *testing.T) { - c1 := func(h http.Handler) http.Handler { - return nil - } - c2 := func(h http.Handler) http.Handler { - return http.StripPrefix("potato", nil) - } - - slice := []Constructor{c1, c2} - - chain := New(slice...) - assert.True(t, funcsEqual(chain.constructors[0], slice[0])) - assert.True(t, funcsEqual(chain.constructors[1], slice[1])) -} - -func TestThenWorksWithNoMiddleware(t *testing.T) { - assert.NotPanics(t, func() { - chain := New() - final := chain.Then(testApp) - - assert.True(t, funcsEqual(final, testApp)) - }) -} - -func TestThenTreatsNilAsDefaultServeMux(t *testing.T) { - chained := New().Then(nil) - assert.Equal(t, chained, http.DefaultServeMux) -} - -func TestThenFuncTreatsNilAsDefaultServeMux(t *testing.T) { - chained := New().ThenFunc(nil) - assert.Equal(t, chained, http.DefaultServeMux) -} - -func TestThenFuncConstructsHandlerFunc(t *testing.T) { - fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) - }) - chained := New().ThenFunc(fn) - rec := httptest.NewRecorder() - chained.ServeHTTP(rec, (*http.Request)(nil)) - assert.Equal(t, 200, rec.Code) -} - -func TestThenOrdersHandlersRight(t *testing.T) { - t1 := tagMiddleware("t1\n") - t2 := tagMiddleware("t2\n") - t3 := tagMiddleware("t3\n") - - chained := New(t1, t2, t3).Then(testApp) - - w := httptest.NewRecorder() - r, err := http.NewRequest("GET", "/", nil) - if err != nil { - t.Fatal(err) - } - - chained.ServeHTTP(w, r) - - assert.Equal(t, w.Body.String(), "t1\nt2\nt3\napp\n") -} - -func TestAppendAddsHandlersCorrectly(t *testing.T) { - chain := New(tagMiddleware("t1\n"), tagMiddleware("t2\n")) - newChain := chain.Append(tagMiddleware("t3\n"), tagMiddleware("t4\n")) - - assert.Equal(t, len(chain.constructors), 2) - assert.Equal(t, len(newChain.constructors), 4) - - chained := newChain.Then(testApp) - - w := httptest.NewRecorder() - r, err := http.NewRequest("GET", "/", nil) - if err != nil { - t.Fatal(err) - } - - chained.ServeHTTP(w, r) - - assert.Equal(t, w.Body.String(), "t1\nt2\nt3\nt4\napp\n") -} - -func TestAppendRespectsImmutability(t *testing.T) { - chain := New(tagMiddleware("")) - newChain := chain.Append(tagMiddleware("")) - - assert.NotEqual(t, &chain.constructors[0], &newChain.constructors[0]) -} - -func TestExtendAddsHandlersCorrectly(t *testing.T) { - chain1 := New(tagMiddleware("t1\n"), tagMiddleware("t2\n")) - chain2 := New(tagMiddleware("t3\n"), tagMiddleware("t4\n")) - newChain := chain1.Extend(chain2) - - assert.Equal(t, len(chain1.constructors), 2) - assert.Equal(t, len(chain2.constructors), 2) - assert.Equal(t, len(newChain.constructors), 4) - - chained := newChain.Then(testApp) - - w := httptest.NewRecorder() - r, err := http.NewRequest("GET", "/", nil) - if err != nil { - t.Fatal(err) - } - - chained.ServeHTTP(w, r) - - assert.Equal(t, w.Body.String(), "t1\nt2\nt3\nt4\napp\n") -} - -func TestExtendRespectsImmutability(t *testing.T) { - chain := New(tagMiddleware("")) - newChain := chain.Extend(New(tagMiddleware(""))) - - assert.NotEqual(t, &chain.constructors[0], &newChain.constructors[0]) -} diff --git a/vendor/github.com/linksmart/go-sec/LICENSE b/vendor/github.com/linksmart/go-sec/LICENSE new file mode 100644 index 00000000..c9b36afd --- /dev/null +++ b/vendor/github.com/linksmart/go-sec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Fraunhofer Institute for Applied Information Technology FIT + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/linksmart/go-sec/NOTICE b/vendor/github.com/linksmart/go-sec/NOTICE new file mode 100644 index 00000000..33537626 --- /dev/null +++ b/vendor/github.com/linksmart/go-sec/NOTICE @@ -0,0 +1,10 @@ +Go Security Package +Copyright 2014 Fraunhofer Institute for Applied Information Technology FIT + +This product includes software developed at +Fraunhofer Institute for Applied Information Technology FIT (http://fit.fraunhofer.de/). + +This repository includes the following dependencies developed by +the corresponding developers and organisations: + +* https://github.com/dgrijalva/jwt-go by Dave Grijalva (MIT License) diff --git a/vendor/code.linksmart.eu/com/go-sec/auth/keycloak/obtainer/obtainer.go b/vendor/github.com/linksmart/go-sec/auth/keycloak/obtainer/obtainer.go similarity index 97% rename from vendor/code.linksmart.eu/com/go-sec/auth/keycloak/obtainer/obtainer.go rename to vendor/github.com/linksmart/go-sec/auth/keycloak/obtainer/obtainer.go index af40cfab..90bb05ed 100644 --- a/vendor/code.linksmart.eu/com/go-sec/auth/keycloak/obtainer/obtainer.go +++ b/vendor/github.com/linksmart/go-sec/auth/keycloak/obtainer/obtainer.go @@ -15,7 +15,7 @@ import ( "strings" "time" - "code.linksmart.eu/com/go-sec/auth/obtainer" + "github.com/linksmart/go-sec/auth/obtainer" ) const ( @@ -52,6 +52,7 @@ func (o *KeycloakObtainer) Login(serverAddr, username, password, clientID string "client_id": {clientID}, "username": {username}, "password": {password}, + "scope": {"openid"}, }) if err != nil { return "", err diff --git a/vendor/code.linksmart.eu/com/go-sec/auth/keycloak/validator/validator.go b/vendor/github.com/linksmart/go-sec/auth/keycloak/validator/validator.go similarity index 98% rename from vendor/code.linksmart.eu/com/go-sec/auth/keycloak/validator/validator.go rename to vendor/github.com/linksmart/go-sec/auth/keycloak/validator/validator.go index 04080443..c73ee6c6 100644 --- a/vendor/code.linksmart.eu/com/go-sec/auth/keycloak/validator/validator.go +++ b/vendor/github.com/linksmart/go-sec/auth/keycloak/validator/validator.go @@ -3,19 +3,18 @@ package validator import ( - "fmt" - "log" - "os" - "strconv" - "crypto/rsa" "crypto/x509" "encoding/base64" "encoding/json" + "fmt" + "log" "net/http" + "os" + "strconv" jwt "github.com/dgrijalva/jwt-go" - "code.linksmart.eu/com/go-sec/auth/validator" + "github.com/linksmart/go-sec/auth/validator" ) const DriverName = "keycloak" diff --git a/vendor/code.linksmart.eu/com/go-sec/auth/obtainer/client.go b/vendor/github.com/linksmart/go-sec/auth/obtainer/client.go similarity index 100% rename from vendor/code.linksmart.eu/com/go-sec/auth/obtainer/client.go rename to vendor/github.com/linksmart/go-sec/auth/obtainer/client.go diff --git a/vendor/code.linksmart.eu/com/go-sec/auth/obtainer/obtainer.go b/vendor/github.com/linksmart/go-sec/auth/obtainer/obtainer.go similarity index 100% rename from vendor/code.linksmart.eu/com/go-sec/auth/obtainer/obtainer.go rename to vendor/github.com/linksmart/go-sec/auth/obtainer/obtainer.go diff --git a/vendor/code.linksmart.eu/com/go-sec/auth/validator/handler.go b/vendor/github.com/linksmart/go-sec/auth/validator/handler.go similarity index 98% rename from vendor/code.linksmart.eu/com/go-sec/auth/validator/handler.go rename to vendor/github.com/linksmart/go-sec/auth/validator/handler.go index 1e8f8706..e025ebee 100644 --- a/vendor/code.linksmart.eu/com/go-sec/auth/validator/handler.go +++ b/vendor/github.com/linksmart/go-sec/auth/validator/handler.go @@ -9,8 +9,8 @@ import ( "net/http" "strings" - _ "code.linksmart.eu/com/go-sec/auth/keycloak/obtainer" - "code.linksmart.eu/com/go-sec/auth/obtainer" + _ "github.com/linksmart/go-sec/auth/keycloak/obtainer" + "github.com/linksmart/go-sec/auth/obtainer" ) // Handler is a http.Handler that validates tickets and performs optional authorization diff --git a/vendor/code.linksmart.eu/com/go-sec/auth/validator/validator.go b/vendor/github.com/linksmart/go-sec/auth/validator/validator.go similarity index 98% rename from vendor/code.linksmart.eu/com/go-sec/auth/validator/validator.go rename to vendor/github.com/linksmart/go-sec/auth/validator/validator.go index f8bd227a..3178a571 100644 --- a/vendor/code.linksmart.eu/com/go-sec/auth/validator/validator.go +++ b/vendor/github.com/linksmart/go-sec/auth/validator/validator.go @@ -9,7 +9,7 @@ import ( "strconv" "sync" - "code.linksmart.eu/com/go-sec/authz" + "github.com/linksmart/go-sec/authz" ) // Interface methods to validate Service Ticket diff --git a/vendor/code.linksmart.eu/com/go-sec/authz/authz.go b/vendor/github.com/linksmart/go-sec/authz/authz.go similarity index 100% rename from vendor/code.linksmart.eu/com/go-sec/authz/authz.go rename to vendor/github.com/linksmart/go-sec/authz/authz.go diff --git a/vendor/code.linksmart.eu/com/go-sec/authz/config.go b/vendor/github.com/linksmart/go-sec/authz/config.go similarity index 100% rename from vendor/code.linksmart.eu/com/go-sec/authz/config.go rename to vendor/github.com/linksmart/go-sec/authz/config.go diff --git a/vendor/code.linksmart.eu/com/go-sec/LICENSE b/vendor/github.com/linksmart/service-catalog/LICENSE similarity index 100% rename from vendor/code.linksmart.eu/com/go-sec/LICENSE rename to vendor/github.com/linksmart/service-catalog/LICENSE diff --git a/vendor/code.linksmart.eu/com/go-sec/NOTICE b/vendor/github.com/linksmart/service-catalog/NOTICE similarity index 100% rename from vendor/code.linksmart.eu/com/go-sec/NOTICE rename to vendor/github.com/linksmart/service-catalog/NOTICE diff --git a/vendor/code.linksmart.eu/sc/service-catalog/discovery/README.md b/vendor/github.com/linksmart/service-catalog/discovery/README.md similarity index 100% rename from vendor/code.linksmart.eu/sc/service-catalog/discovery/README.md rename to vendor/github.com/linksmart/service-catalog/discovery/README.md diff --git a/vendor/code.linksmart.eu/sc/service-catalog/discovery/catalog.go b/vendor/github.com/linksmart/service-catalog/discovery/catalog.go similarity index 100% rename from vendor/code.linksmart.eu/sc/service-catalog/discovery/catalog.go rename to vendor/github.com/linksmart/service-catalog/discovery/catalog.go diff --git a/vendor/github.com/linksmart/service-catalog/discovery/init.go b/vendor/github.com/linksmart/service-catalog/discovery/init.go new file mode 100644 index 00000000..227c07fc --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/discovery/init.go @@ -0,0 +1,19 @@ +// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT + +// Package discovery contains utility functions, which help to implement +// various use-cases of executing some logic as a result of DNS-SD service +// lookup +package discovery + +import ( + "github.com/farshidtz/elog" +) + +var logger *elog.Logger + +func init() { + logger = elog.New("[discovery] ", &elog.Config{ + DebugPrefix: "[discovery-debug] ", + DebugTrace: elog.NoTrace, + }) +} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/discovery/utils.go b/vendor/github.com/linksmart/service-catalog/discovery/utils.go similarity index 87% rename from vendor/code.linksmart.eu/sc/service-catalog/discovery/utils.go rename to vendor/github.com/linksmart/service-catalog/discovery/utils.go index 932c12f7..8aa42630 100644 --- a/vendor/code.linksmart.eu/sc/service-catalog/discovery/utils.go +++ b/vendor/github.com/linksmart/service-catalog/discovery/utils.go @@ -1,8 +1,5 @@ // Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT -// Package discovery contains utility functions, which help to implement -// various use-cases of executing some logic as a result of DNS-SD service -// lookup package discovery import ( diff --git a/vendor/code.linksmart.eu/sc/service-catalog/LICENSE b/vendor/github.com/linksmart/service-catalog/v2/LICENSE similarity index 100% rename from vendor/code.linksmart.eu/sc/service-catalog/LICENSE rename to vendor/github.com/linksmart/service-catalog/v2/LICENSE diff --git a/vendor/code.linksmart.eu/sc/service-catalog/NOTICE b/vendor/github.com/linksmart/service-catalog/v2/NOTICE similarity index 100% rename from vendor/code.linksmart.eu/sc/service-catalog/NOTICE rename to vendor/github.com/linksmart/service-catalog/v2/NOTICE diff --git a/vendor/github.com/linksmart/service-catalog/v2/catalog/catalog.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/catalog.go new file mode 100644 index 00000000..c5f81b0d --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/catalog.go @@ -0,0 +1,121 @@ +// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT + +package catalog + +import ( + "fmt" + "mime" + "net/url" + "strings" + "time" +) + +// Structs + +// Service is a service entry in the catalog +type Service struct { + ID string `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + APIs map[string]string `json:"apis"` + Docs []Doc `json:"docs"` + Meta map[string]interface{} `json:"meta"` + TTL uint `json:"ttl,omitempty"` + Created time.Time `json:"created"` + Updated time.Time `json:"updated"` + // Expires is the time when service will be removed from the system (Only when TTL is set) + Expires time.Time `json:"expires,omitempty"` +} + +// API is representation of service's API +type API struct { + Protocol string `json:"protocol"` + URL string `json:"url"` +} + +// Doc is an external resource documenting the service and/or APIs. E.g. OpenAPI specs, Wiki page +type Doc struct { + Description string `json:"description"` + Type string `json:"type"` + URL string `json:"url"` + APIs []string `json:"apis"` +} + +// Validates the Service configuration +func (s Service) validate() error { + + if strings.ContainsAny(s.ID, " ") { + return fmt.Errorf("service id must not contain spaces") + } + _, err := url.Parse("http://example.com/" + s.ID) + if err != nil { + return fmt.Errorf("service is is no valid invalid: %v", err) + } + + if s.Name == "" { + return fmt.Errorf("service name not defined") + } + if strings.ContainsAny(s.Name, " ") { + return fmt.Errorf("service name must not contain spaces") + } + + if s.TTL == 0 || s.TTL > MaxServiceTTL { + return fmt.Errorf("service TTL should be between 1 and 86400 (i.e. 1 second to one day)") + } + for _, URL := range s.APIs { + if _, err := url.Parse(URL); err != nil { + return fmt.Errorf("invalid service API url: %s", URL) + } + } + + for _, doc := range s.Docs { + // if doc.Type == "" { + // return fmt.Errorf("doc type not defined") + // } + if _, _, err := mime.ParseMediaType(doc.Type); err != nil { + return fmt.Errorf("invalid service doc MIME type: %s: %s", doc.URL, err) + } + if _, err := url.Parse(doc.URL); err != nil { + return fmt.Errorf("invalid service doc url: %s", doc.URL) + } + // if len(s.APIs) != 0 { + // for _, api := range doc.APIs { + // if _, found := s.APIs[api]; !found { + // return fmt.Errorf("service API name in doc does not match any apis: %s", api) + // } + // } + // } + } + + return nil +} + +// Error describes an API error (serializable in JSON) +type Error struct { + // Code is the (http) code of the error + Code int `json:"code"` + // Message is the (human-readable) error message + Message string `json:"message"` +} + +// Interfaces + +// Storage interface +type Storage interface { + add(s *Service) error + get(id string) (*Service, error) + update(id string, s *Service) error + delete(id string) error + list(page, perPage int) ([]Service, int, error) + total() (int, error) + iterator() <-chan *Service + Close() error +} + +// Listener interface can be used for notification of the catalog updates +// NOTE: Implementations are expected to be thread safe +type Listener interface { + added(s Service) + updated(s Service) + deleted(s Service) +} diff --git a/vendor/github.com/linksmart/service-catalog/v2/catalog/constants.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/constants.go new file mode 100644 index 00000000..567d5fe2 --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/constants.go @@ -0,0 +1,22 @@ +// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT + +package catalog + +const ( + DNSSDServiceType = "_linksmart-sc._tcp" + MaxPerPage = 100 + LoggerPrefix = "[sc] " + + CatalogBackendMemory = "memory" + CatalogBackendLevelDB = "leveldb" + + APITypeHTTP = "HTTP" + APITypeMQTT = "MQTT" + + MaxServiceTTL = 24 * 60 * 60 //in seconds +) + +var SupportedBackends = map[string]bool{ + CatalogBackendMemory: true, + CatalogBackendLevelDB: true, +} diff --git a/vendor/github.com/linksmart/service-catalog/v2/catalog/controller.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/controller.go new file mode 100644 index 00000000..bb66d237 --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/controller.go @@ -0,0 +1,220 @@ +// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT + +package catalog + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/linksmart/service-catalog/v2/utils" + uuid "github.com/satori/go.uuid" +) + +var ControllerExpiryCleanupInterval = 60 * time.Second // to be modified in unit tests + +type Controller struct { + wg sync.WaitGroup + sync.RWMutex + storage Storage + listeners []Listener +} + +func NewController(storage Storage, listeners ...Listener) (*Controller, error) { + c := Controller{ + storage: storage, + listeners: listeners, + } + + go c.cleanExpired() + + return &c, nil +} + +func (c *Controller) add(s Service) (*Service, error) { + if err := s.validate(); err != nil { + return nil, &BadRequestError{err.Error()} + } + + c.Lock() + defer c.Unlock() + + if s.ID == "" { + // System generated id + s.ID = uuid.NewV4().String() + } + s.Created = time.Now().UTC() + s.Updated = s.Created + + s.Expires = s.Created.Add(time.Duration(s.TTL) * time.Second) + + err := c.storage.add(&s) + if err != nil { + return nil, err + } + + // notify listeners + for _, l := range c.listeners { + go l.added(s) + } + + return &s, nil +} + +func (c *Controller) get(id string) (*Service, error) { + return c.storage.get(id) +} + +func (c *Controller) update(id string, s Service) (*Service, error) { + if err := s.validate(); err != nil { + return nil, &BadRequestError{err.Error()} + } + + c.Lock() + defer c.Unlock() + + // Get the stored service + ss, err := c.storage.get(id) + if err != nil { + return nil, err + } + + ss.Description = s.Description + ss.Name = s.Name + ss.APIs = s.APIs + ss.Docs = s.Docs + ss.Meta = s.Meta + ss.TTL = s.TTL + ss.Updated = time.Now().UTC() + + ss.Expires = ss.Updated.Add(time.Duration(ss.TTL) * time.Second) + + err = c.storage.update(id, ss) + if err != nil { + return nil, err + } + + // notify listeners + for _, l := range c.listeners { + go l.updated(s) + } + + return ss, nil +} + +func (c *Controller) delete(id string) error { + c.Lock() + defer c.Unlock() + + old, err := c.storage.get(id) + if err != nil { + return err + } + + err = c.storage.delete(id) + if err != nil { + return err + } + + // notify listeners + for _, l := range c.listeners { + go l.deleted(*old) + } + + return nil +} + +func (c *Controller) list(page, perPage int) ([]Service, int, error) { + return c.storage.list(page, perPage) +} + +func (c *Controller) filter(path, op, value string, page, perPage int) ([]Service, int, error) { + c.RLock() + defer c.RUnlock() + + matches := make([]Service, 0) + pp := MaxPerPage + for p := 1; ; p++ { + services, t, err := c.storage.list(p, pp) + if err != nil { + return nil, 0, err + } + + for i := range services { + matched, err := utils.MatchObject(services[i], strings.Split(path, "."), op, value) + if err != nil { + return nil, 0, err + } + if matched { + matches = append(matches, services[i]) + } + } + + if p*pp >= t { + break + } + } + // Pagination + offset, limit, err := utils.GetPagingAttr(len(matches), page, perPage, MaxPerPage) + if err != nil { + return nil, 0, &BadRequestError{fmt.Sprintf("Unable to paginate: %s", err)} + } + // Return the page + return matches[offset : offset+limit], len(matches), nil +} + +func (c *Controller) total() (int, error) { + return c.storage.total() +} + +func (c *Controller) cleanExpired() { + for t := time.Now(); true; t = <-time.Tick(ControllerExpiryCleanupInterval) { + c.Lock() + var expiredServices []*Service + + for s := range c.storage.iterator() { + // remove if expiry is overdue by half-TTL + if t.After(s.Expires.Add(time.Duration(s.TTL/2) * time.Second)) { + expiredServices = append(expiredServices, s) + } + } + + for i := range expiredServices { + logger.Printf("cleanExpired() Removing expired registration: %s", expiredServices[i].ID) + err := c.storage.delete(expiredServices[i].ID) + if err != nil { + logger.Printf("cleanExpired() Error removing expired registration: %s: %s", expiredServices[i].ID, err) + continue + } + // notify listeners + for li := range c.listeners { + go c.listeners[li].deleted(*expiredServices[i]) + } + } + c.Unlock() + } +} + +func (c *Controller) AddListener(listener Listener) { + c.Lock() + c.listeners = append(c.listeners, listener) + c.Unlock() +} + +func (c *Controller) RemoveListener(listener Listener) { + c.Lock() + for i, l := range c.listeners { + if l == listener { + //delete the entry and break + c.listeners = append(c.listeners[:i], c.listeners[i+1:]...) + break + } + } + c.Unlock() +} + +// Stop the controller +func (c *Controller) Stop() error { + return c.storage.Close() +} diff --git a/vendor/github.com/linksmart/service-catalog/v2/catalog/errors.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/errors.go new file mode 100644 index 00000000..b529c5ea --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/errors.go @@ -0,0 +1,18 @@ +// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT + +package catalog + +// Not Found +type NotFoundError struct{ Msg string } + +func (e *NotFoundError) Error() string { return e.Msg } + +// Conflict (non-unique id, assignment to read-only data) +type ConflictError struct{ Msg string } + +func (e *ConflictError) Error() string { return e.Msg } + +// Bad Request +type BadRequestError struct{ Msg string } + +func (e *BadRequestError) Error() string { return e.Msg } diff --git a/vendor/github.com/linksmart/service-catalog/v2/catalog/http.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/http.go new file mode 100644 index 00000000..2c2ed92c --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/http.go @@ -0,0 +1,244 @@ +// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT + +package catalog + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/gorilla/mux" + "github.com/linksmart/service-catalog/v2/utils" +) + +type HttpAPI struct { + controller *Controller + id string + description string + version string +} + +// NewHTTPAPI creates a RESTful HTTP API +func NewHTTPAPI(controller *Controller, id, description, version string) *HttpAPI { + return &HttpAPI{ + controller: controller, + id: id, + description: description, + version: version, + } +} + +// Collection is the paginated list of services +type Collection struct { + ID string `json:"id"` + Description string `json:"description"` + Services []Service `json:"services"` + Page int `json:"page"` + PerPage int `json:"per_page"` + Total int `json:"total"` +} + +// API Index: Lists services +func (a *HttpAPI) List(w http.ResponseWriter, req *http.Request) { + err := req.ParseForm() + if err != nil { + a.ErrorResponse(w, http.StatusBadRequest, "Error parsing the query:", err.Error()) + return + } + page, perPage, err := utils.ParsePagingParams( + req.Form.Get(utils.GetParamPage), req.Form.Get(utils.GetParamPerPage), MaxPerPage) + if err != nil { + a.ErrorResponse(w, http.StatusBadRequest, "Error parsing query parameters:", err.Error()) + return + } + + services, total, err := a.controller.list(page, perPage) + if err != nil { + a.ErrorResponse(w, http.StatusInternalServerError, err.Error()) + return + } + + coll := &Collection{ + ID: a.id, + Description: a.description, + Services: services, + Page: page, + PerPage: perPage, + Total: total, + } + + w.Header().Set("Content-Type", "application/json;version="+a.version) + json.NewEncoder(w).Encode(coll) +} + +// Filters services +func (a *HttpAPI) Filter(w http.ResponseWriter, req *http.Request) { + params := mux.Vars(req) + path := params["path"] + op := params["op"] + value := params["value"] + + err := req.ParseForm() + if err != nil { + a.ErrorResponse(w, http.StatusBadRequest, "Error parsing the query:", err.Error()) + return + } + page, perPage, err := utils.ParsePagingParams( + req.Form.Get(utils.GetParamPage), req.Form.Get(utils.GetParamPerPage), MaxPerPage) + if err != nil { + a.ErrorResponse(w, http.StatusBadRequest, "Error parsing query parameters:", err.Error()) + return + } + + services, total, err := a.controller.filter(path, op, value, page, perPage) + if err != nil { + a.ErrorResponse(w, http.StatusInternalServerError, err.Error()) + return + } + + coll := &Collection{ + ID: a.id, + Description: a.description, + Services: services, + Page: page, + PerPage: perPage, + Total: total, + } + + w.Header().Set("Content-Type", "application/json;version="+a.version) + json.NewEncoder(w).Encode(coll) +} + +// Retrieves a service +func (a *HttpAPI) Get(w http.ResponseWriter, req *http.Request) { + params := mux.Vars(req) + + s, err := a.controller.get(params["id"]) + if err != nil { + switch err.(type) { + case *NotFoundError: + a.ErrorResponse(w, http.StatusNotFound, err.Error()) + return + default: + a.ErrorResponse(w, http.StatusInternalServerError, "Error retrieving the service:", err.Error()) + return + } + } + + w.Header().Set("Content-Type", "application/json;version="+a.version) + json.NewEncoder(w).Encode(s) +} + +func (a *HttpAPI) createService(w http.ResponseWriter, s *Service) { + addedS, err := a.controller.add(*s) + if err != nil { + switch err.(type) { + case *ConflictError: + a.ErrorResponse(w, http.StatusConflict, "Error creating the registration:", err.Error()) + return + case *BadRequestError: + a.ErrorResponse(w, http.StatusBadRequest, "Invalid service registration:", err.Error()) + return + default: + a.ErrorResponse(w, http.StatusInternalServerError, "Error creating the registration:", err.Error()) + return + } + } + + w.Header().Set("Content-Type", "application/json;version="+a.version) + w.Header().Set("Location", fmt.Sprintf("/%s", addedS.ID)) + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(addedS) +} + +// Adds a service +func (a *HttpAPI) Post(w http.ResponseWriter, req *http.Request) { + var s Service + err := json.NewDecoder(req.Body).Decode(&s) + if err != nil { + a.ErrorResponse(w, http.StatusBadRequest, "Error processing the request:", err.Error()) + return + } + + if s.ID != "" { + a.ErrorResponse(w, http.StatusBadRequest, "Creating a service with defined ID is not possible using a POST request.") + return + } + + a.createService(w, &s) + return +} + +// Updates an existing service (Response: StatusOK) +// or creates a new one with the given id (Response: StatusCreated) +func (a *HttpAPI) Put(w http.ResponseWriter, req *http.Request) { + params := mux.Vars(req) + + var s Service + err := json.NewDecoder(req.Body).Decode(&s) + if err != nil { + a.ErrorResponse(w, http.StatusBadRequest, "Error processing the request:", err.Error()) + return + } + + updatedS, err := a.controller.update(params["id"], s) + if err != nil { + switch err.(type) { + case *NotFoundError: + // Create a new service with the given id + s.ID = params["id"] + a.createService(w, &s) + return + case *ConflictError: + a.ErrorResponse(w, http.StatusConflict, "Error updating the service:", err.Error()) + return + case *BadRequestError: + a.ErrorResponse(w, http.StatusBadRequest, "Invalid service registration:", err.Error()) + return + default: + a.ErrorResponse(w, http.StatusInternalServerError, "Error updating the service:", err.Error()) + return + } + } + + w.Header().Set("Content-Type", "application/json;version="+a.version) + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(updatedS) +} + +// Deletes a service +func (a *HttpAPI) Delete(w http.ResponseWriter, req *http.Request) { + params := mux.Vars(req) + + err := a.controller.delete(params["id"]) + if err != nil { + switch err.(type) { + case *NotFoundError: + a.ErrorResponse(w, http.StatusNotFound, err.Error()) + return + default: + a.ErrorResponse(w, http.StatusInternalServerError, "Error deleting the service:", err.Error()) + return + } + } + + w.Header().Set("Content-Type", "application/json;version="+a.version) + w.WriteHeader(http.StatusOK) +} + +// a.ErrorResponse writes error to HTTP ResponseWriter +func (a *HttpAPI) ErrorResponse(w http.ResponseWriter, code int, msgs ...string) { + msg := strings.Join(msgs, " ") + e := &Error{ + code, + msg, + } + if code >= 500 { + logger.Println("ERROR:", msg) + } + + w.Header().Set("Content-Type", "application/json;version="+a.version) + w.WriteHeader(code) + json.NewEncoder(w).Encode(e) +} diff --git a/vendor/github.com/linksmart/service-catalog/v2/catalog/init.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/init.go new file mode 100644 index 00000000..a14ed285 --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/init.go @@ -0,0 +1,30 @@ +// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT + +// Package catalog contains the core functionalities of service catalog and +// exposes functions and structs for service representation, processing, and storage +package catalog + +import ( + "log" + "os" + + paho "github.com/eclipse/paho.mqtt.golang" + "github.com/farshidtz/elog" +) + +var logger *elog.Logger + +func init() { + logger = elog.New(LoggerPrefix, &elog.Config{ + DebugPrefix: LoggerPrefix, + DebugTrace: elog.NoTrace, + }) + + if os.Getenv("PAHO_DEBUG") == "1" { + w := elog.NewWriter(os.Stdout) + paho.ERROR = log.New(w, "[paho-error] ", 0) + paho.CRITICAL = log.New(w, "[paho-crit] ", 0) + paho.WARN = log.New(w, "[paho-warn] ", 0) + paho.DEBUG = log.New(w, "[paho-debug] ", 0) + } +} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/ldbstorage.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/ldbstorage.go similarity index 78% rename from vendor/code.linksmart.eu/sc/service-catalog/service/ldbstorage.go rename to vendor/github.com/linksmart/service-catalog/v2/catalog/ldbstorage.go index 9d2fc8c2..6075dc5c 100644 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/ldbstorage.go +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/ldbstorage.go @@ -1,6 +1,6 @@ // Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT -package service +package catalog import ( "encoding/json" @@ -8,7 +8,7 @@ import ( "net/url" "sync" - "code.linksmart.eu/sc/service-catalog/utils" + "github.com/linksmart/service-catalog/v2/utils" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/opt" @@ -20,7 +20,7 @@ type LevelDBStorage struct { wg sync.WaitGroup } -func NewLevelDBStorage(dsn string, opts *opt.Options) (CatalogStorage, error) { +func NewLevelDBStorage(dsn string, opts *opt.Options) (Storage, error) { url, err := url.Parse(dsn) if err != nil { return &LevelDBStorage{}, err @@ -43,14 +43,14 @@ func (ls *LevelDBStorage) add(s *Service) error { return err } - _, err = ls.db.Get([]byte(s.Id), nil) + _, err = ls.db.Get([]byte(s.ID), nil) if err == nil { return &ConflictError{"Service id is not unique."} } else if err != leveldb.ErrNotFound { return err } - err = ls.db.Put([]byte(s.Id), bytes, nil) + err = ls.db.Put([]byte(s.ID), bytes, nil) if err != nil { return err } @@ -120,8 +120,12 @@ func (ls *LevelDBStorage) list(page int, perPage int) ([]Service, int, error) { // TODO: is there a better way to do this? // github.com/syndtr/goleveldb/leveldb/iterator services := make([]Service, limit) + ls.wg.Add(1) + defer ls.wg.Done() iter := ls.db.NewIterator(nil, nil) + defer iter.Release() + i := 0 for iter.Next() { var s Service @@ -137,8 +141,7 @@ func (ls *LevelDBStorage) list(page int, perPage int) ([]Service, int, error) { } i++ } - iter.Release() - ls.wg.Done() + err = iter.Error() if err != nil { return nil, 0, err @@ -164,6 +167,36 @@ func (s *LevelDBStorage) total() (int, error) { return c, nil } +func (s *LevelDBStorage) iterator() <-chan *Service { + serviceIter := make(chan *Service) + + go func() { + defer close(serviceIter) + + s.wg.Add(1) + defer s.wg.Done() + iter := s.db.NewIterator(nil, nil) + defer iter.Release() + + for iter.Next() { + var service Service + err := json.Unmarshal(iter.Value(), &service) + if err != nil { + logger.Printf("LevelDB Error: %s", err) + return + } + serviceIter <- &service + } + + err := iter.Error() + if err != nil { + logger.Printf("LevelDB Error: %s", err) + } + }() + + return serviceIter +} + func (s *LevelDBStorage) Close() error { s.wg.Wait() return s.db.Close() diff --git a/vendor/code.linksmart.eu/sc/service-catalog/service/memstorage.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/memstorage.go similarity index 78% rename from vendor/code.linksmart.eu/sc/service-catalog/service/memstorage.go rename to vendor/github.com/linksmart/service-catalog/v2/catalog/memstorage.go index 4bb64e80..a0b1a6ac 100644 --- a/vendor/code.linksmart.eu/sc/service-catalog/service/memstorage.go +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/memstorage.go @@ -1,12 +1,12 @@ // Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT -package service +package catalog import ( "fmt" "sync" - "code.linksmart.eu/sc/service-catalog/utils" + "github.com/linksmart/service-catalog/v2/utils" avl "github.com/ancientlore/go-avltree" ) @@ -31,7 +31,7 @@ func (ms *MemoryStorage) add(s *Service) error { _, duplicate := ms.services.Add(*s) if duplicate { - return &ConflictError{fmt.Sprintf("Service id %s is not unique", s.Id)} + return &ConflictError{fmt.Sprintf("Service id %s is not unique", s.ID)} } return nil @@ -41,7 +41,7 @@ func (ms *MemoryStorage) get(id string) (*Service, error) { ms.RLock() defer ms.RUnlock() - s := ms.services.Find(Service{Id: id}) + s := ms.services.Find(Service{ID: id}) if s == nil { return nil, &NotFoundError{fmt.Sprintf("Service with id %s is not found", id)} } @@ -54,7 +54,7 @@ func (ms *MemoryStorage) update(id string, s *Service) error { ms.Lock() defer ms.Unlock() - r := ms.services.Remove(Service{Id: id}) + r := ms.services.Remove(Service{ID: id}) if r == nil { return &NotFoundError{fmt.Sprintf("Service with id %s is not found", id)} } @@ -68,7 +68,7 @@ func (ms *MemoryStorage) delete(id string) error { ms.Lock() defer ms.Unlock() - r := ms.services.Remove(Service{Id: id}) + r := ms.services.Remove(Service{ID: id}) if r == nil { return &NotFoundError{fmt.Sprintf("Service with id %s is not found", id)} } @@ -107,15 +107,31 @@ func (ms *MemoryStorage) total() (int, error) { return ms.services.Len(), nil } +func (ms *MemoryStorage) iterator() <-chan *Service { + serviceIter := make(chan *Service) + + go func() { + defer close(serviceIter) + + data := ms.services.Data() + for i := 0; i < len(data); i++ { + service := data[i].(Service) + serviceIter <- &service + } + }() + + return serviceIter +} + func (ms *MemoryStorage) Close() error { return nil } // Comparison operator for AVL Tree func operator(a interface{}, b interface{}) int { - if a.(Service).Id < b.(Service).Id { + if a.(Service).ID < b.(Service).ID { return -1 - } else if a.(Service).Id > b.(Service).Id { + } else if a.(Service).ID > b.(Service).ID { return 1 } return 0 diff --git a/vendor/github.com/linksmart/service-catalog/v2/catalog/mqtt.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/mqtt.go new file mode 100644 index 00000000..9fea176e --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/mqtt.go @@ -0,0 +1,264 @@ +// Copyright 2016 Fraunhofer Institute for Applied Information Technology FIT + +package catalog + +import ( + "encoding/json" + "log" + "strings" + "time" + + paho "github.com/eclipse/paho.mqtt.golang" + mqtttopic "github.com/farshidtz/mqtt-match" + uuid "github.com/satori/go.uuid" +) + +const ( + mqttMaxRetryInterval = 10 * time.Minute + mqttServiceTTL = 10 * time.Minute + mqttServiceHeartbeatInterval = mqttServiceTTL / 2 + mqttServiceName = "_mqtt._tcp" +) + +type MQTTManager struct { + controller *Controller + scID string + topicPrefix string + + clients []*MQTTClient +} + +type MQTTClient struct { + MQTTClientConf + paho paho.Client + topics []string + willTopics []string + manager *MQTTManager +} + +func StartMQTTManager(controller *Controller, mqttConf MQTTConf, scID string) { + m := &MQTTManager{ + controller: controller, + scID: scID, + topicPrefix: mqttConf.TopicPrefix, + } + controller.AddListener(m) + + for _, clientConf := range append(mqttConf.AdditionalClients, mqttConf.Client) { + if clientConf.BrokerURI == "" { + continue + } + + var client MQTTClient + client.MQTTClientConf = clientConf + client.manager = m + + if client.BrokerID == "" { + client.BrokerID = uuid.NewV4().String() + } + + client.topics = append(mqttConf.CommonRegTopics, client.RegTopics...) + client.willTopics = append(mqttConf.CommonWillTopics, client.WillTopics...) + + logger.Printf("MQTT: Added client for %s", client.BrokerURI) + m.clients = append(m.clients, &client) + go client.connect() + } +} + +func (c *MQTTClient) connect() { + interval := 15 * time.Second + for { + opts, err := c.pahoOptions() + if err != nil { + log.Fatalf("MQTT: Error configuring Paho options: %s", err) + } + // Add handlers + opts.SetOnConnectHandler(c.onConnect) + opts.SetConnectionLostHandler(c.onDisconnect) + + c.paho = paho.NewClient(opts) + if token := c.paho.Connect(); token.Wait() && token.Error() != nil { + log.Printf("Error connecting to broker: %v. Retry in %v", token.Error(), interval) + time.Sleep(interval) + if interval *= 2; interval > mqttMaxRetryInterval { + interval = mqttMaxRetryInterval + } + continue + } + + go c.manager.registerAsService(c) + break + } +} + +func (c *MQTTClient) onConnect(pahoClient paho.Client) { + logger.Printf("MQTT: %s: Connected.", c.BrokerURI) + + for _, topic := range append(c.topics, c.willTopics...) { + if token := pahoClient.Subscribe(topic, c.QoS, c.onMessage); token.Wait() && token.Error() != nil { + logger.Printf("MQTT: %s: Error subscribing: %v", c.BrokerURI, token.Error()) + } + logger.Printf("MQTT: %s: Subscribed to %s", c.BrokerURI, topic) + } +} + +func (c *MQTTClient) onDisconnect(pahoClient paho.Client, err error) { + logger.Printf("MQTT: %s: Disconnected: %s", c.BrokerURI, err) +} + +func (c *MQTTClient) onMessage(_ paho.Client, msg paho.Message) { + topic, payload := msg.Topic(), msg.Payload() + logger.Debugf("MQTT: %s %s", topic, payload) + + // Will message has ID in topic + // Get id from topic. Expects: will/ + for _, filter := range c.willTopics { + if mqtttopic.Match(filter, topic) { + if parts := strings.SplitAfter(msg.Topic(), "will/"); len(parts) == 2 && parts[1] != "" { + c.manager.removeService(Service{ID: parts[1]}) + return + } + } + } + + // Get id from topic. Expects: service/ + var id string + if parts := strings.SplitAfter(msg.Topic(), "service/"); len(parts) == 2 { + id = parts[1] + } + + var service Service + err := json.Unmarshal(payload, &service) + if err != nil { + logger.Printf("MQTT: Error parsing json: %s : %v", payload, err) + return + } + + if service.ID == "" && id == "" { + logger.Printf("MQTT: Invalid registration: ID not provided") + return + } else if service.ID == "" { + logger.Debugf("MQTT: Getting id from topic: %s", id) + service.ID = id + } + + c.manager.addService(service) +} + +func (m *MQTTManager) registerAsService(client *MQTTClient) { + service := Service{ + ID: client.BrokerID, + Name: mqttServiceName, + Description: "MQTT Broker", + Meta: map[string]interface{}{ + "registrator": m.scID, + }, + APIs: map[string]string{ + APITypeMQTT: client.BrokerURI, + }, + TTL: uint(mqttServiceTTL / time.Second), + } + // keepalive starting from right now + for ; true; <-time.NewTicker(mqttServiceHeartbeatInterval).C { + m.addService(service) + } +} + +//Controller Listener interface implementation +func (m *MQTTManager) added(s Service) { + if len(m.clients) > 0 { + m.publishAliveService(s) + } +} + +//Controller Listener interface implementation +func (m *MQTTManager) updated(s Service) { + if len(m.clients) > 0 { + m.publishAliveService(s) + } +} + +//Controller Listener interface implementation +func (m *MQTTManager) deleted(s Service) { + if len(m.clients) > 0 { + m.publishDeadService(s) + } +} + +func (m *MQTTManager) publishAliveService(s Service) { + payload, err := json.Marshal(s) + if err != nil { + logger.Printf("MQTT: Error parsing json: %s ", err) + return + } + topic := m.topicPrefix + s.Name + "/" + s.ID + "/alive" + for _, client := range m.clients { + if token := client.paho.Publish(topic, 1, true, payload); token.Wait() && token.Error() != nil { + logger.Printf("MQTT: %s: Error publishing: %v", client.BrokerURI, token.Error()) + } + logger.Printf("MQTT: %s: Published service %s with topic %s", client.BrokerURI, s.ID, topic) + } +} + +func (m *MQTTManager) publishDeadService(s Service) { + // remove the retained message + topic := m.topicPrefix + s.Name + "/" + s.ID + "/alive" + + for _, client := range m.clients { + if token := client.paho.Publish(topic, 1, true, ""); token.Wait() && token.Error() != nil { + logger.Printf("MQTT: %s: Error publishing: %v", client.BrokerURI, token.Error()) + } + logger.Printf("MQTT: %s: Removed the retain message topic: %s", client.BrokerURI, topic) + } + + // publish dead message + topic = m.topicPrefix + s.Name + "/" + s.ID + "/dead" + payload, err := json.Marshal(s) + if err != nil { + logger.Printf("MQTT: Error parsing json: %s ", err) + return + } + for _, client := range m.clients { + if token := client.paho.Publish(topic, 1, false, payload); token.Wait() && token.Error() != nil { + logger.Printf("MQTT: %s: Error publishing: %v", client.BrokerURI, token.Error()) + } + logger.Printf("MQTT: %s: Published delete for service %s with topic %s", client.BrokerURI, s.ID, topic) + } +} + +func (m *MQTTManager) removeService(service Service) { + err := m.controller.delete(service.ID) + if err != nil { + logger.Printf("MQTT: Error removing service: %s: %s", service.ID, err) + return + } + logger.Printf("MQTT: Removed service: %s", service.ID) +} + +func (m *MQTTManager) addService(service Service) { + _, err := m.controller.update(service.ID, service) + if err != nil { + switch err.(type) { + case *NotFoundError: + // Create a new service with the given id + _, err := m.controller.add(service) + if err != nil { + switch err.(type) { + case *BadRequestError: + logger.Printf("MQTT: Invalid service: %s", err) + default: + logger.Printf("MQTT: Error adding service: %s", err) + } + } else { + logger.Printf("MQTT: Added service: %s", service.ID) + } + case *BadRequestError: + logger.Printf("MQTT: Invalid service: %s", err) + default: + logger.Printf("MQTT: Error updating service: %s", err) + } + } else { + logger.Printf("MQTT: Updated service: %s", service.ID) + } +} diff --git a/vendor/github.com/linksmart/service-catalog/v2/catalog/mqtt_conf.go b/vendor/github.com/linksmart/service-catalog/v2/catalog/mqtt_conf.go new file mode 100644 index 00000000..d57e9bbc --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/v2/catalog/mqtt_conf.go @@ -0,0 +1,103 @@ +package catalog + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/url" + "strings" + + paho "github.com/eclipse/paho.mqtt.golang" + uuid "github.com/satori/go.uuid" +) + +const ( + mqttClientIDPrefix = "SC-" +) + +type MQTTConf struct { + Client MQTTClientConf `json:"client"` + AdditionalClients []MQTTClientConf `json:"additionalClients"` + CommonRegTopics []string `json:"commonRegTopics"` + CommonWillTopics []string `json:"commonWillTopics"` + TopicPrefix string `json:"topicPrefix"` +} + +type MQTTClientConf struct { + BrokerID string `json:"brokerID"` + BrokerURI string `json:"brokerURI"` + RegTopics []string `json:"regTopics"` + WillTopics []string `json:"willTopics"` + QoS byte `json:"qos"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + CaFile string `json:"caFile,omitempty"` // trusted CA certificates file path + CertFile string `json:"certFile,omitempty"` // client certificate file path + KeyFile string `json:"keyFile,omitempty"` // client private key file path +} + +func (c MQTTConf) Validate() error { + + for _, client := range append(c.AdditionalClients, c.Client) { + if client.BrokerURI == "" { + continue + } + _, err := url.Parse(client.BrokerURI) + if err != nil { + return err + } + if client.QoS > 2 { + return fmt.Errorf("QoS must be 0, 1, or 2") + } + if len(c.CommonRegTopics) == 0 && len(client.RegTopics) == 0 { + return fmt.Errorf("regTopics not defined") + } + } + return nil +} + +func (client MQTTClientConf) pahoOptions() (*paho.ClientOptions, error) { + opts := paho.NewClientOptions() // uses defaults: https://godoc.org/github.com/eclipse/paho.mqtt.golang#NewClientOptions + opts.AddBroker(client.BrokerURI) + opts.SetClientID(fmt.Sprintf("%s%s", mqttClientIDPrefix, uuid.NewV4().String())) + + if client.Username != "" { + opts.SetUsername(client.Username) + } + if client.Password != "" { + opts.SetPassword(client.Password) + } + + // TLS CONFIG + tlsConfig := &tls.Config{} + if client.CaFile != "" { + if !strings.HasPrefix(client.BrokerURI, "ssl") { + logger.Printf("MQTT: Warning: Configuring TLS with a non-SSL protocol: %s", client.BrokerURI) + } + // Import trusted certificates from CAfile.pem. + // Alternatively, manually add CA certificates to + // default openssl CA bundle. + tlsConfig.RootCAs = x509.NewCertPool() + pemCerts, err := ioutil.ReadFile(client.CaFile) + if err == nil { + tlsConfig.RootCAs.AppendCertsFromPEM(pemCerts) + } + } + if client.CertFile != "" && client.KeyFile != "" { + // Import client certificate/key pair + cert, err := tls.LoadX509KeyPair(client.CertFile, client.KeyFile) + if err != nil { + return nil, fmt.Errorf("error loading client keypair: %s", err) + } + // Just to print out the client certificate.. + cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return nil, fmt.Errorf("error parsing client certificate: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + opts.SetTLSConfig(tlsConfig) + + return opts, nil +} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/utils/http.go b/vendor/github.com/linksmart/service-catalog/v2/utils/http.go similarity index 97% rename from vendor/code.linksmart.eu/sc/service-catalog/utils/http.go rename to vendor/github.com/linksmart/service-catalog/v2/utils/http.go index b89c5b40..4c7b24e9 100644 --- a/vendor/code.linksmart.eu/sc/service-catalog/utils/http.go +++ b/vendor/github.com/linksmart/service-catalog/v2/utils/http.go @@ -8,7 +8,7 @@ import ( "net/http" "strings" - "code.linksmart.eu/com/go-sec/auth/obtainer" + "github.com/linksmart/go-sec/auth/obtainer" ) // Constructs and submits an HTTP request and returns the response diff --git a/vendor/github.com/linksmart/service-catalog/v2/utils/init.go b/vendor/github.com/linksmart/service-catalog/v2/utils/init.go new file mode 100644 index 00000000..d0354a54 --- /dev/null +++ b/vendor/github.com/linksmart/service-catalog/v2/utils/init.go @@ -0,0 +1,17 @@ +// Copyright 2014-2016 Fraunhofer Institute for Applied Information Technology FIT + +// Package utils offers utility functions for http requests, pagination, and filtering +package utils + +import ( + "github.com/farshidtz/elog" +) + +var logger *elog.Logger + +func init() { + logger = elog.New("[utils] ", &elog.Config{ + DebugPrefix: "[utils-debug] ", + DebugTrace: elog.NoTrace, + }) +} diff --git a/vendor/code.linksmart.eu/sc/service-catalog/utils/pagination.go b/vendor/github.com/linksmart/service-catalog/v2/utils/pagination.go similarity index 100% rename from vendor/code.linksmart.eu/sc/service-catalog/utils/pagination.go rename to vendor/github.com/linksmart/service-catalog/v2/utils/pagination.go diff --git a/vendor/code.linksmart.eu/sc/service-catalog/utils/pathfilter.go b/vendor/github.com/linksmart/service-catalog/v2/utils/pathfilter.go similarity index 100% rename from vendor/code.linksmart.eu/sc/service-catalog/utils/pathfilter.go rename to vendor/github.com/linksmart/service-catalog/v2/utils/pathfilter.go diff --git a/vendor/github.com/miekg/dns/client_test.go b/vendor/github.com/miekg/dns/client_test.go deleted file mode 100644 index 73083dba..00000000 --- a/vendor/github.com/miekg/dns/client_test.go +++ /dev/null @@ -1,532 +0,0 @@ -package dns - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "strconv" - "sync" - "testing" - "time" -) - -func TestClientSync(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } - // And now with plain Exchange(). - r, err = Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r == nil || r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} - -func TestClientTLSSync(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) - if err != nil { - t.Fatalf("unable to build certificate: %v", err) - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - c.Net = "tcp-tls" - c.TLSConfig = &tls.Config{ - InsecureSkipVerify: true, - } - - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} - -func TestClientSyncBadID(t *testing.T) { - HandleFunc("miek.nl.", HelloServerBadID) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - if _, _, err := c.Exchange(m, addrstr); err != ErrId { - t.Errorf("did not find a bad Id") - } - // And now with plain Exchange(). - if _, err := Exchange(m, addrstr); err != ErrId { - t.Errorf("did not find a bad Id") - } -} - -func TestClientEDNS0(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeDNSKEY) - - m.SetEdns0(2048, true) - - c := new(Client) - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} - -// Validates the transmission and parsing of local EDNS0 options. -func TestClientEDNS0Local(t *testing.T) { - optStr1 := "1979:0x0707" - optStr2 := strconv.Itoa(EDNS0LOCALSTART) + ":0x0601" - - handler := func(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - m.Extra = make([]RR, 1, 2) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello local edns"}} - - // If the local options are what we expect, then reflect them back. - ec1 := req.Extra[0].(*OPT).Option[0].(*EDNS0_LOCAL).String() - ec2 := req.Extra[0].(*OPT).Option[1].(*EDNS0_LOCAL).String() - if ec1 == optStr1 && ec2 == optStr2 { - m.Extra = append(m.Extra, req.Extra[0]) - } - - w.WriteMsg(m) - } - - HandleFunc("miek.nl.", handler) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %s", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - - // Add two local edns options to the query. - ec1 := &EDNS0_LOCAL{Code: 1979, Data: []byte{7, 7}} - ec2 := &EDNS0_LOCAL{Code: EDNS0LOCALSTART, Data: []byte{6, 1}} - o := &OPT{Hdr: RR_Header{Name: ".", Rrtype: TypeOPT}, Option: []EDNS0{ec1, ec2}} - m.Extra = append(m.Extra, o) - - c := new(Client) - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %s", err) - } - - if r != nil && r.Rcode != RcodeSuccess { - t.Error("failed to get a valid answer") - t.Logf("%v\n", r) - } - - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello local edns" { - t.Error("Unexpected result for miek.nl", txt, "!= Hello local edns") - } - - // Validate the local options in the reply. - got := r.Extra[1].(*OPT).Option[0].(*EDNS0_LOCAL).String() - if got != optStr1 { - t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr1) - t.Logf("%v\n", r) - } - - got = r.Extra[1].(*OPT).Option[1].(*EDNS0_LOCAL).String() - if got != optStr2 { - t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr2) - t.Logf("%v\n", r) - } -} - -// ExampleTsigSecret_updateLeaseTSIG shows how to update a lease signed with TSIG -func ExampleTsigSecret_updateLeaseTSIG() { - m := new(Msg) - m.SetUpdate("t.local.ip6.io.") - rr, _ := NewRR("t.local.ip6.io. 30 A 127.0.0.1") - rrs := make([]RR, 1) - rrs[0] = rr - m.Insert(rrs) - - leaseRr := new(OPT) - leaseRr.Hdr.Name = "." - leaseRr.Hdr.Rrtype = TypeOPT - e := new(EDNS0_UL) - e.Code = EDNS0UL - e.Lease = 120 - leaseRr.Option = append(leaseRr.Option, e) - m.Extra = append(m.Extra, leaseRr) - - c := new(Client) - m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix()) - c.TsigSecret = map[string]string{"polvi.": "pRZgBrBvI4NAHZYhxmhs/Q=="} - - _, _, err := c.Exchange(m, "127.0.0.1:53") - if err != nil { - panic(err) - } -} - -func TestClientConn(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - // This uses TCP just to make it slightly different than TestClientSync - s, addrstr, err := RunLocalTCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - cn, err := Dial("tcp", addrstr) - if err != nil { - t.Errorf("failed to dial %s: %v", addrstr, err) - } - - err = cn.WriteMsg(m) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - r, err := cn.ReadMsg() - if err != nil { - t.Errorf("failed to get a valid answer: %v", err) - } - if r == nil || r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } - - err = cn.WriteMsg(m) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - h := new(Header) - buf, err := cn.ReadMsgHeader(h) - if buf == nil { - t.Errorf("failed to get an valid answer\n%v", r) - } - if err != nil { - t.Errorf("failed to get a valid answer: %v", err) - } - if int(h.Bits&0xF) != RcodeSuccess { - t.Errorf("failed to get an valid answer in ReadMsgHeader\n%v", r) - } - if h.Ancount != 0 || h.Qdcount != 1 || h.Nscount != 0 || h.Arcount != 1 { - t.Errorf("expected to have question and additional in response; got something else: %+v", h) - } - if err = r.Unpack(buf); err != nil { - t.Errorf("unable to unpack message fully: %v", err) - } -} - -func TestTruncatedMsg(t *testing.T) { - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSRV) - cnt := 10 - for i := 0; i < cnt; i++ { - r := &SRV{ - Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeSRV, Class: ClassINET, Ttl: 0}, - Port: uint16(i + 8000), - Target: "target.miek.nl.", - } - m.Answer = append(m.Answer, r) - - re := &A{ - Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeA, Class: ClassINET, Ttl: 0}, - A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i)).To4(), - } - m.Extra = append(m.Extra, re) - } - buf, err := m.Pack() - if err != nil { - t.Errorf("failed to pack: %v", err) - } - - r := new(Msg) - if err = r.Unpack(buf); err != nil { - t.Errorf("unable to unpack message: %v", err) - } - if len(r.Answer) != cnt { - t.Errorf("answer count after regular unpack doesn't match: %d", len(r.Answer)) - } - if len(r.Extra) != cnt { - t.Errorf("extra count after regular unpack doesn't match: %d", len(r.Extra)) - } - - m.Truncated = true - buf, err = m.Pack() - if err != nil { - t.Errorf("failed to pack truncated: %v", err) - } - - r = new(Msg) - if err = r.Unpack(buf); err != nil && err != ErrTruncated { - t.Errorf("unable to unpack truncated message: %v", err) - } - if !r.Truncated { - t.Errorf("truncated message wasn't unpacked as truncated") - } - if len(r.Answer) != cnt { - t.Errorf("answer count after truncated unpack doesn't match: %d", len(r.Answer)) - } - if len(r.Extra) != cnt { - t.Errorf("extra count after truncated unpack doesn't match: %d", len(r.Extra)) - } - - // Now we want to remove almost all of the extra records - // We're going to loop over the extra to get the count of the size of all - // of them - off := 0 - buf1 := make([]byte, m.Len()) - for i := 0; i < len(m.Extra); i++ { - off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress) - if err != nil { - t.Errorf("failed to pack extra: %v", err) - } - } - - // Remove all of the extra bytes but 10 bytes from the end of buf - off -= 10 - buf1 = buf[:len(buf)-off] - - r = new(Msg) - if err = r.Unpack(buf1); err != nil && err != ErrTruncated { - t.Errorf("unable to unpack cutoff message: %v", err) - } - if !r.Truncated { - t.Error("truncated cutoff message wasn't unpacked as truncated") - } - if len(r.Answer) != cnt { - t.Errorf("answer count after cutoff unpack doesn't match: %d", len(r.Answer)) - } - if len(r.Extra) != 0 { - t.Errorf("extra count after cutoff unpack is not zero: %d", len(r.Extra)) - } - - // Now we want to remove almost all of the answer records too - buf1 = make([]byte, m.Len()) - as := 0 - for i := 0; i < len(m.Extra); i++ { - off1 := off - off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress) - as = off - off1 - if err != nil { - t.Errorf("failed to pack extra: %v", err) - } - } - - // Keep exactly one answer left - // This should still cause Answer to be nil - off -= as - buf1 = buf[:len(buf)-off] - - r = new(Msg) - if err = r.Unpack(buf1); err != nil && err != ErrTruncated { - t.Errorf("unable to unpack cutoff message: %v", err) - } - if !r.Truncated { - t.Error("truncated cutoff message wasn't unpacked as truncated") - } - if len(r.Answer) != 0 { - t.Errorf("answer count after second cutoff unpack is not zero: %d", len(r.Answer)) - } - - // Now leave only 1 byte of the question - // Since the header is always 12 bytes, we just need to keep 13 - buf1 = buf[:13] - - r = new(Msg) - err = r.Unpack(buf1) - if err == nil || err == ErrTruncated { - t.Errorf("error should not be ErrTruncated from question cutoff unpack: %v", err) - } - - // Finally, if we only have the header, we should still return an error - buf1 = buf[:12] - - r = new(Msg) - if err = r.Unpack(buf1); err == nil || err != ErrTruncated { - t.Errorf("error not ErrTruncated from header-only unpack: %v", err) - } -} - -func TestTimeout(t *testing.T) { - // Set up a dummy UDP server that won't respond - addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") - if err != nil { - t.Fatalf("unable to resolve local udp address: %v", err) - } - conn, err := net.ListenUDP("udp", addr) - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer conn.Close() - addrstr := conn.LocalAddr().String() - - // Message to send - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - - // Use a channel + timeout to ensure we don't get stuck if the - // Client Timeout is not working properly - done := make(chan struct{}, 2) - - timeout := time.Millisecond - allowable := timeout + (10 * time.Millisecond) - abortAfter := timeout + (100 * time.Millisecond) - - start := time.Now() - - go func() { - c := &Client{Timeout: timeout} - _, _, err := c.Exchange(m, addrstr) - if err == nil { - t.Error("no timeout using Client.Exchange") - } - done <- struct{}{} - }() - - go func() { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - c := &Client{} - _, _, err := c.ExchangeContext(ctx, m, addrstr) - if err == nil { - t.Error("no timeout using Client.ExchangeContext") - } - done <- struct{}{} - }() - - // Wait for both the Exchange and ExchangeContext tests to be done. - for i := 0; i < 2; i++ { - select { - case <-done: - case <-time.After(abortAfter): - } - } - - length := time.Since(start) - - if length > allowable { - t.Errorf("exchange took longer (%v) than specified Timeout (%v)", length, timeout) - } -} - -// Check that responses from deduplicated requests aren't shared between callers -func TestConcurrentExchanges(t *testing.T) { - cases := make([]*Msg, 2) - cases[0] = new(Msg) - cases[1] = new(Msg) - cases[1].Truncated = true - for _, m := range cases { - block := make(chan struct{}) - waiting := make(chan struct{}) - - handler := func(w ResponseWriter, req *Msg) { - r := m.Copy() - r.SetReply(req) - - waiting <- struct{}{} - <-block - w.WriteMsg(r) - } - - HandleFunc("miek.nl.", handler) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %s", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSRV) - c := &Client{ - SingleInflight: true, - } - r := make([]*Msg, 2) - - var wg sync.WaitGroup - wg.Add(len(r)) - for i := 0; i < len(r); i++ { - go func(i int) { - r[i], _, _ = c.Exchange(m.Copy(), addrstr) - wg.Done() - }(i) - } - select { - case <-waiting: - case <-time.After(time.Second): - t.FailNow() - } - close(block) - wg.Wait() - - if r[0] == r[1] { - t.Log("Got same response object, expected non-shared responses") - t.Fail() - } - } -} diff --git a/vendor/github.com/miekg/dns/clientconfig_test.go b/vendor/github.com/miekg/dns/clientconfig_test.go deleted file mode 100644 index 7755a8a6..00000000 --- a/vendor/github.com/miekg/dns/clientconfig_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package dns - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -const normal string = ` -# Comment -domain somedomain.com -nameserver 10.28.10.2 -nameserver 11.28.10.1 -` - -const missingNewline string = ` -domain somedomain.com -nameserver 10.28.10.2 -nameserver 11.28.10.1` // <- NOTE: NO newline. - -func testConfig(t *testing.T, data string) { - tempDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("tempDir: %v", err) - } - defer os.RemoveAll(tempDir) - - path := filepath.Join(tempDir, "resolv.conf") - if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil { - t.Fatalf("writeFile: %v", err) - } - cc, err := ClientConfigFromFile(path) - if err != nil { - t.Errorf("error parsing resolv.conf: %v", err) - } - if l := len(cc.Servers); l != 2 { - t.Errorf("incorrect number of nameservers detected: %d", l) - } - if l := len(cc.Search); l != 1 { - t.Errorf("domain directive not parsed correctly: %v", cc.Search) - } else { - if cc.Search[0] != "somedomain.com" { - t.Errorf("domain is unexpected: %v", cc.Search[0]) - } - } -} - -func TestNameserver(t *testing.T) { testConfig(t, normal) } -func TestMissingFinalNewLine(t *testing.T) { testConfig(t, missingNewline) } - -func TestNameList(t *testing.T) { - cfg := ClientConfig{ - Ndots: 1, - } - // fqdn should be only result returned - names := cfg.NameList("miek.nl.") - if len(names) != 1 { - t.Errorf("NameList returned != 1 names: %v", names) - } else if names[0] != "miek.nl." { - t.Errorf("NameList didn't return sent fqdn domain: %v", names[0]) - } - - cfg.Search = []string{ - "test", - } - // Sent domain has NDots and search - names = cfg.NameList("miek.nl") - if len(names) != 2 { - t.Errorf("NameList returned != 2 names: %v", names) - } else if names[0] != "miek.nl." { - t.Errorf("NameList didn't return sent domain first: %v", names[0]) - } else if names[1] != "miek.nl.test." { - t.Errorf("NameList didn't return search last: %v", names[1]) - } - - cfg.Ndots = 2 - // Sent domain has less than NDots and search - names = cfg.NameList("miek.nl") - if len(names) != 2 { - t.Errorf("NameList returned != 2 names: %v", names) - } else if names[0] != "miek.nl.test." { - t.Errorf("NameList didn't return search first: %v", names[0]) - } else if names[1] != "miek.nl." { - t.Errorf("NameList didn't return sent domain last: %v", names[1]) - } -} diff --git a/vendor/github.com/miekg/dns/compress_generate.go b/vendor/github.com/miekg/dns/compress_generate.go deleted file mode 100644 index 1a301e9f..00000000 --- a/vendor/github.com/miekg/dns/compress_generate.go +++ /dev/null @@ -1,184 +0,0 @@ -//+build ignore - -// compression_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will look to see if there are (compressible) names, if so it will add that -// type to compressionLenHelperType and comressionLenSearchType which "fake" the -// compression so that Len() is fast. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" -) - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from compress_generate.go - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - domainTypes := map[string]bool{} // Types that have a domain name in them (either comressible or not). - cdomainTypes := map[string]bool{} // Types that have a compressible domain name in them (subset of domainType) - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - st, _ := getTypeStruct(o.Type(), scope) - if st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - for i := 1; i < st.NumFields(); i++ { - if _, ok := st.Field(i).Type().(*types.Slice); ok { - if st.Tag(i) == `dns:"domain-name"` { - domainTypes[o.Name()] = true - } - if st.Tag(i) == `dns:"cdomain-name"` { - cdomainTypes[o.Name()] = true - domainTypes[o.Name()] = true - } - continue - } - - switch { - case st.Tag(i) == `dns:"domain-name"`: - domainTypes[o.Name()] = true - case st.Tag(i) == `dns:"cdomain-name"`: - cdomainTypes[o.Name()] = true - domainTypes[o.Name()] = true - } - } - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names - - fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n") - fmt.Fprint(b, "switch x := r.(type) {\n") - for name, _ := range domainTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "case *%s:\n", name) - for i := 1; i < st.NumFields(); i++ { - out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"domain-name"`: - fallthrough - case `dns:"cdomain-name"`: - // For HIP we need to slice over the elements in this slice. - fmt.Fprintf(b, `for i := range x.%s { - compressionLenHelper(c, x.%s[i]) - } -`, st.Field(i).Name(), st.Field(i).Name()) - } - continue - } - - switch { - case st.Tag(i) == `dns:"cdomain-name"`: - fallthrough - case st.Tag(i) == `dns:"domain-name"`: - out(st.Field(i).Name()) - } - } - } - fmt.Fprintln(b, "}\n}\n\n") - - // compressionLenSearchType - search cdomain-tags types for compressible names. - - fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n") - fmt.Fprint(b, "switch x := r.(type) {\n") - for name, _ := range cdomainTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "case *%s:\n", name) - j := 1 - for i := 1; i < st.NumFields(); i++ { - out := func(s string, j int) { - fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name()) - } - - // There are no slice types with names that can be compressed. - - switch { - case st.Tag(i) == `dns:"cdomain-name"`: - out(st.Field(i).Name(), j) - j++ - } - } - k := "k1" - ok := "ok1" - for i := 2; i < j; i++ { - k += fmt.Sprintf(" + k%d", i) - ok += fmt.Sprintf(" && ok%d", i) - } - fmt.Fprintf(b, "return %s, %s\n", k, ok) - } - fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n") - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - f, err := os.Create("zcompress.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/dns_bench_test.go b/vendor/github.com/miekg/dns/dns_bench_test.go deleted file mode 100644 index bccc3d54..00000000 --- a/vendor/github.com/miekg/dns/dns_bench_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package dns - -import ( - "net" - "testing" -) - -func BenchmarkMsgLength(b *testing.B) { - b.StopTimer() - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - b.StartTimer() - for i := 0; i < b.N; i++ { - msg.Len() - } -} - -func BenchmarkMsgLengthPack(b *testing.B) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = msg.Pack() - } -} - -func BenchmarkPackDomainName(b *testing.B) { - name1 := "12345678901234567890123456789012345.12345678.123." - buf := make([]byte, len(name1)+1) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = PackDomainName(name1, buf, 0, nil, false) - } -} - -func BenchmarkUnpackDomainName(b *testing.B) { - name1 := "12345678901234567890123456789012345.12345678.123." - buf := make([]byte, len(name1)+1) - _, _ = PackDomainName(name1, buf, 0, nil, false) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackDomainName(buf, 0) - } -} - -func BenchmarkUnpackDomainNameUnprintable(b *testing.B) { - name1 := "\x02\x02\x02\x025\x02\x02\x02\x02.12345678.123." - buf := make([]byte, len(name1)+1) - _, _ = PackDomainName(name1, buf, 0, nil, false) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackDomainName(buf, 0) - } -} - -func BenchmarkCopy(b *testing.B) { - b.ReportAllocs() - m := new(Msg) - m.SetQuestion("miek.nl.", TypeA) - rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") - m.Answer = []RR{rr} - rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") - m.Ns = []RR{rr} - rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.1") - m.Extra = []RR{rr} - - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Copy() - } -} - -func BenchmarkPackA(b *testing.B) { - a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)} - - buf := make([]byte, a.len()) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = PackRR(a, buf, 0, nil, false) - } -} - -func BenchmarkUnpackA(b *testing.B) { - a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)} - - buf := make([]byte, a.len()) - PackRR(a, buf, 0, nil, false) - a = nil - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackRR(buf, 0) - } -} - -func BenchmarkPackMX(b *testing.B) { - m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."} - - buf := make([]byte, m.len()) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = PackRR(m, buf, 0, nil, false) - } -} - -func BenchmarkUnpackMX(b *testing.B) { - m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."} - - buf := make([]byte, m.len()) - PackRR(m, buf, 0, nil, false) - m = nil - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackRR(buf, 0) - } -} - -func BenchmarkPackAAAAA(b *testing.B) { - aaaa, _ := NewRR(". IN A ::1") - - buf := make([]byte, aaaa.len()) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = PackRR(aaaa, buf, 0, nil, false) - } -} - -func BenchmarkUnpackAAAA(b *testing.B) { - aaaa, _ := NewRR(". IN A ::1") - - buf := make([]byte, aaaa.len()) - PackRR(aaaa, buf, 0, nil, false) - aaaa = nil - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackRR(buf, 0) - } -} - -func BenchmarkPackMsg(b *testing.B) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - buf := make([]byte, 512) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = msg.PackBuffer(buf) - } -} - -func BenchmarkUnpackMsg(b *testing.B) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - msgBuf, _ := msg.Pack() - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = msg.Unpack(msgBuf) - } -} - -func BenchmarkIdGeneration(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = id() - } -} diff --git a/vendor/github.com/miekg/dns/dns_test.go b/vendor/github.com/miekg/dns/dns_test.go deleted file mode 100644 index 5568c316..00000000 --- a/vendor/github.com/miekg/dns/dns_test.go +++ /dev/null @@ -1,453 +0,0 @@ -package dns - -import ( - "encoding/hex" - "net" - "testing" -) - -func TestPackUnpack(t *testing.T) { - out := new(Msg) - out.Answer = make([]RR, 1) - key := new(DNSKEY) - key = &DNSKEY{Flags: 257, Protocol: 3, Algorithm: RSASHA1} - key.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 3600} - key.PublicKey = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" - - out.Answer[0] = key - msg, err := out.Pack() - if err != nil { - t.Error("failed to pack msg with DNSKEY") - } - in := new(Msg) - if in.Unpack(msg) != nil { - t.Error("failed to unpack msg with DNSKEY") - } - - sig := new(RRSIG) - sig = &RRSIG{TypeCovered: TypeDNSKEY, Algorithm: RSASHA1, Labels: 2, - OrigTtl: 3600, Expiration: 4000, Inception: 4000, KeyTag: 34641, SignerName: "miek.nl.", - Signature: "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"} - sig.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeRRSIG, Class: ClassINET, Ttl: 3600} - - out.Answer[0] = sig - msg, err = out.Pack() - if err != nil { - t.Error("failed to pack msg with RRSIG") - } - - if in.Unpack(msg) != nil { - t.Error("failed to unpack msg with RRSIG") - } -} - -func TestPackUnpack2(t *testing.T) { - m := new(Msg) - m.Extra = make([]RR, 1) - m.Answer = make([]RR, 1) - dom := "miek.nl." - rr := new(A) - rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} - rr.A = net.IPv4(127, 0, 0, 1) - - x := new(TXT) - x.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - x.Txt = []string{"heelalaollo"} - - m.Extra[0] = x - m.Answer[0] = rr - _, err := m.Pack() - if err != nil { - t.Error("Packing failed: ", err) - return - } -} - -func TestPackUnpack3(t *testing.T) { - m := new(Msg) - m.Extra = make([]RR, 2) - m.Answer = make([]RR, 1) - dom := "miek.nl." - rr := new(A) - rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} - rr.A = net.IPv4(127, 0, 0, 1) - - x1 := new(TXT) - x1.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - x1.Txt = []string{} - - x2 := new(TXT) - x2.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - x2.Txt = []string{"heelalaollo"} - - m.Extra[0] = x1 - m.Extra[1] = x2 - m.Answer[0] = rr - b, err := m.Pack() - if err != nil { - t.Error("packing failed: ", err) - return - } - - var unpackMsg Msg - err = unpackMsg.Unpack(b) - if err != nil { - t.Error("unpacking failed") - return - } -} - -func TestBailiwick(t *testing.T) { - yes := map[string]string{ - "miek1.nl": "miek1.nl", - "miek.nl": "ns.miek.nl", - ".": "miek.nl", - } - for parent, child := range yes { - if !IsSubDomain(parent, child) { - t.Errorf("%s should be child of %s", child, parent) - t.Errorf("comparelabels %d", CompareDomainName(parent, child)) - t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) - } - } - no := map[string]string{ - "www.miek.nl": "ns.miek.nl", - "m\\.iek.nl": "ns.miek.nl", - "w\\.iek.nl": "w.iek.nl", - "p\\\\.iek.nl": "ns.p.iek.nl", // p\\.iek.nl , literal \ in domain name - "miek.nl": ".", - } - for parent, child := range no { - if IsSubDomain(parent, child) { - t.Errorf("%s should not be child of %s", child, parent) - t.Errorf("comparelabels %d", CompareDomainName(parent, child)) - t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) - } - } -} - -func TestPack(t *testing.T) { - rr := []string{"US. 86400 IN NSEC 0-.us. NS SOA RRSIG NSEC DNSKEY TYPE65534"} - m := new(Msg) - var err error - m.Answer = make([]RR, 1) - for _, r := range rr { - m.Answer[0], err = NewRR(r) - if err != nil { - t.Errorf("failed to create RR: %v", err) - continue - } - if _, err := m.Pack(); err != nil { - t.Errorf("packing failed: %v", err) - } - } - x := new(Msg) - ns, _ := NewRR("pool.ntp.org. 390 IN NS a.ntpns.org") - ns.(*NS).Ns = "a.ntpns.org" - x.Ns = append(m.Ns, ns) - x.Ns = append(m.Ns, ns) - x.Ns = append(m.Ns, ns) - // This crashes due to the fact the a.ntpns.org isn't a FQDN - // How to recover() from a remove panic()? - if _, err := x.Pack(); err == nil { - t.Error("packing should fail") - } - x.Answer = make([]RR, 1) - x.Answer[0], err = NewRR(rr[0]) - if err != nil { - t.Fatal(err) - } - if _, err := x.Pack(); err == nil { - t.Error("packing should fail") - } - x.Question = make([]Question, 1) - x.Question[0] = Question{";sd#eddddsé›↙èµÂ‘℅∥↙xzztsestxssweewwsssstx@s@Z嵌e@cn.pool.ntp.org.", TypeA, ClassINET} - if _, err := x.Pack(); err == nil { - t.Error("packing should fail") - } -} - -func TestPackNAPTR(t *testing.T) { - for _, n := range []string{ - `apple.com. IN NAPTR 100 50 "se" "SIP+D2U" "" _sip._udp.apple.com.`, - `apple.com. IN NAPTR 90 50 "se" "SIP+D2T" "" _sip._tcp.apple.com.`, - `apple.com. IN NAPTR 50 50 "se" "SIPS+D2T" "" _sips._tcp.apple.com.`, - } { - rr, _ := NewRR(n) - msg := make([]byte, rr.len()) - if off, err := PackRR(rr, msg, 0, nil, false); err != nil { - t.Errorf("packing failed: %v", err) - t.Errorf("length %d, need more than %d", rr.len(), off) - } else { - t.Logf("buf size needed: %d", off) - } - } -} - -func TestCompressLength(t *testing.T) { - m := new(Msg) - m.SetQuestion("miek.nl", TypeMX) - ul := m.Len() - m.Compress = true - if ul != m.Len() { - t.Fatalf("should be equal") - } -} - -// Does the predicted length match final packed length? -func TestMsgCompressLength(t *testing.T) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - - name1 := "12345678901234567890123456789012345.12345678.123." - rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - tests := []*Msg{ - makeMsg(name1, []RR{rrA}, nil, nil), - makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} - - for _, msg := range tests { - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", - msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } - } -} - -func TestMsgLength(t *testing.T) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - return msg - } - - name1 := "12345678901234567890123456789012345.12345678.123." - rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - tests := []*Msg{ - makeMsg(name1, []RR{rrA}, nil, nil), - makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} - - for _, msg := range tests { - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d", - msg.Question[0].Name, predicted, len(buf)) - } - } -} - -func TestMsgLength2(t *testing.T) { - // Serialized replies - var testMessages = []string{ - // google.com. IN A? - "064e81800001000b0004000506676f6f676c6503636f6d0000010001c00c00010001000000050004adc22986c00c00010001000000050004adc22987c00c00010001000000050004adc22988c00c00010001000000050004adc22989c00c00010001000000050004adc2298ec00c00010001000000050004adc22980c00c00010001000000050004adc22981c00c00010001000000050004adc22982c00c00010001000000050004adc22983c00c00010001000000050004adc22984c00c00010001000000050004adc22985c00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc0d800010001000000050004d8ef200ac0ea00010001000000050004d8ef220ac0fc00010001000000050004d8ef240ac10e00010001000000050004d8ef260a0000290500000000050000", - // amazon.com. IN A? (reply has no EDNS0 record) - // TODO(miek): this one is off-by-one, need to find out why - //"6de1818000010004000a000806616d617a6f6e03636f6d0000010001c00c000100010000000500044815c2d4c00c000100010000000500044815d7e8c00c00010001000000050004b02062a6c00c00010001000000050004cdfbf236c00c000200010000000500140570646e733408756c747261646e73036f726700c00c000200010000000500150570646e733508756c747261646e7304696e666f00c00c000200010000000500160570646e733608756c747261646e7302636f02756b00c00c00020001000000050014036e7331037033310664796e656374036e657400c00c00020001000000050006036e7332c0cfc00c00020001000000050006036e7333c0cfc00c00020001000000050006036e7334c0cfc00c000200010000000500110570646e733108756c747261646e73c0dac00c000200010000000500080570646e7332c127c00c000200010000000500080570646e7333c06ec0cb00010001000000050004d04e461fc0eb00010001000000050004cc0dfa1fc0fd00010001000000050004d04e471fc10f00010001000000050004cc0dfb1fc12100010001000000050004cc4a6c01c121001c000100000005001020010502f3ff00000000000000000001c13e00010001000000050004cc4a6d01c13e001c0001000000050010261000a1101400000000000000000001", - // yahoo.com. IN A? - "fc2d81800001000300070008057961686f6f03636f6d0000010001c00c00010001000000050004628afd6dc00c00010001000000050004628bb718c00c00010001000000050004cebe242dc00c00020001000000050006036e7336c00cc00c00020001000000050006036e7338c00cc00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7335c00cc07b0001000100000005000444b48310c08d00010001000000050004448eff10c09f00010001000000050004cb54dd35c0b100010001000000050004628a0b9dc0c30001000100000005000477a0f77cc05700010001000000050004ca2bdfaac06900010001000000050004caa568160000290500000000050000", - // microsoft.com. IN A? - "f4368180000100020005000b096d6963726f736f667403636f6d0000010001c00c0001000100000005000440040b25c00c0001000100000005000441373ac9c00c0002000100000005000e036e7331046d736674036e657400c00c00020001000000050006036e7332c04fc00c00020001000000050006036e7333c04fc00c00020001000000050006036e7334c04fc00c00020001000000050006036e7335c04fc04b000100010000000500044137253ec04b001c00010000000500102a010111200500000000000000010001c0650001000100000005000440043badc065001c00010000000500102a010111200600060000000000010001c07700010001000000050004d5c7b435c077001c00010000000500102a010111202000000000000000010001c08900010001000000050004cf2e4bfec089001c00010000000500102404f800200300000000000000010001c09b000100010000000500044137e28cc09b001c00010000000500102a010111200f000100000000000100010000290500000000050000", - // google.com. IN MX? - "724b8180000100050004000b06676f6f676c6503636f6d00000f0001c00c000f000100000005000c000a056173706d78016cc00cc00c000f0001000000050009001404616c7431c02ac00c000f0001000000050009001e04616c7432c02ac00c000f0001000000050009002804616c7433c02ac00c000f0001000000050009003204616c7434c02ac00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7331c00cc02a00010001000000050004adc2421bc02a001c00010000000500102a00145040080c01000000000000001bc04200010001000000050004adc2461bc05700010001000000050004adc2451bc06c000100010000000500044a7d8f1bc081000100010000000500044a7d191bc0ca00010001000000050004d8ef200ac09400010001000000050004d8ef220ac0a600010001000000050004d8ef240ac0b800010001000000050004d8ef260a0000290500000000050000", - // reddit.com. IN A? - "12b98180000100080000000c0672656464697403636f6d0000020001c00c0002000100000005000f046175733204616b616d036e657400c00c000200010000000500070475736534c02dc00c000200010000000500070475737733c02dc00c000200010000000500070475737735c02dc00c00020001000000050008056173696131c02dc00c00020001000000050008056173696139c02dc00c00020001000000050008056e73312d31c02dc00c0002000100000005000a076e73312d313935c02dc02800010001000000050004c30a242ec04300010001000000050004451f1d39c05600010001000000050004451f3bc7c0690001000100000005000460073240c07c000100010000000500046007fb81c090000100010000000500047c283484c090001c00010000000500102a0226f0006700000000000000000064c0a400010001000000050004c16c5b01c0a4001c000100000005001026001401000200000000000000000001c0b800010001000000050004c16c5bc3c0b8001c0001000000050010260014010002000000000000000000c30000290500000000050000", - } - - for i, hexData := range testMessages { - // we won't fail the decoding of the hex - input, _ := hex.DecodeString(hexData) - - m := new(Msg) - m.Unpack(input) - m.Compress = true - lenComp := m.Len() - b, _ := m.Pack() - pacComp := len(b) - m.Compress = false - lenUnComp := m.Len() - b, _ = m.Pack() - pacUnComp := len(b) - if pacComp+1 != lenComp { - t.Errorf("msg.Len(compressed)=%d actual=%d for test %d", lenComp, pacComp, i) - } - if pacUnComp+1 != lenUnComp { - t.Errorf("msg.Len(uncompressed)=%d actual=%d for test %d", lenUnComp, pacUnComp, i) - } - } -} - -func TestMsgLengthCompressionMalformed(t *testing.T) { - // SOA with empty hostmaster, which is illegal - soa := &SOA{Hdr: RR_Header{Name: ".", Rrtype: TypeSOA, Class: ClassINET, Ttl: 12345}, - Ns: ".", - Mbox: "", - Serial: 0, - Refresh: 28800, - Retry: 7200, - Expire: 604800, - Minttl: 60} - m := new(Msg) - m.Compress = true - m.Ns = []RR{soa} - m.Len() // Should not crash. -} - -func TestMsgCompressLength2(t *testing.T) { - msg := new(Msg) - msg.Compress = true - msg.SetQuestion(Fqdn("bliep."), TypeANY) - msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "blaat.", Rrtype: 0x21, Class: 0x1, Ttl: 0x3c}, Port: 0x4c57, Target: "foo.bar."}) - msg.Extra = append(msg.Extra, &A{Hdr: RR_Header{Name: "foo.bar.", Rrtype: 0x1, Class: 0x1, Ttl: 0x3c}, A: net.IP{0xac, 0x11, 0x0, 0x3}}) - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted != len(buf) { - t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", - msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } -} - -func TestToRFC3597(t *testing.T) { - a, _ := NewRR("miek.nl. IN A 10.0.1.1") - x := new(RFC3597) - x.ToRFC3597(a) - if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` { - t.Errorf("string mismatch, got: %s", x) - } - - b, _ := NewRR("miek.nl. IN MX 10 mx.miek.nl.") - x.ToRFC3597(b) - if x.String() != `miek.nl. 3600 CLASS1 TYPE15 \# 14 000a026d78046d69656b026e6c00` { - t.Errorf("string mismatch, got: %s", x) - } -} - -func TestNoRdataPack(t *testing.T) { - data := make([]byte, 1024) - for typ, fn := range TypeToRR { - r := fn() - *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16} - _, err := PackRR(r, data, 0, nil, false) - if err != nil { - t.Errorf("failed to pack RR with zero rdata: %s: %v", TypeToString[typ], err) - } - } -} - -func TestNoRdataUnpack(t *testing.T) { - data := make([]byte, 1024) - for typ, fn := range TypeToRR { - if typ == TypeSOA || typ == TypeTSIG { - // SOA, TSIG will not be seen (like this) in dyn. updates? - continue - } - r := fn() - *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16} - off, err := PackRR(r, data, 0, nil, false) - if err != nil { - // Should always works, TestNoDataPack should have caught this - t.Errorf("failed to pack RR: %v", err) - continue - } - rr, _, err := UnpackRR(data[:off], 0) - if err != nil { - t.Errorf("failed to unpack RR with zero rdata: %s: %v", TypeToString[typ], err) - } - t.Log(rr) - } -} - -func TestRdataOverflow(t *testing.T) { - rr := new(RFC3597) - rr.Hdr.Name = "." - rr.Hdr.Class = ClassINET - rr.Hdr.Rrtype = 65280 - rr.Rdata = hex.EncodeToString(make([]byte, 0xFFFF)) - buf := make([]byte, 0xFFFF*2) - if _, err := PackRR(rr, buf, 0, nil, false); err != nil { - t.Fatalf("maximum size rrdata pack failed: %v", err) - } - rr.Rdata += "00" - if _, err := PackRR(rr, buf, 0, nil, false); err != ErrRdata { - t.Fatalf("oversize rrdata pack didn't return ErrRdata - instead: %v", err) - } -} - -func TestCopy(t *testing.T) { - rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") // Weird TTL to avoid catching TTL - rr1 := Copy(rr) - if rr.String() != rr1.String() { - t.Fatalf("Copy() failed %s != %s", rr.String(), rr1.String()) - } -} - -func TestMsgCopy(t *testing.T) { - m := new(Msg) - m.SetQuestion("miek.nl.", TypeA) - rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") - m.Answer = []RR{rr} - rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") - m.Ns = []RR{rr} - - m1 := m.Copy() - if m.String() != m1.String() { - t.Fatalf("Msg.Copy() failed %s != %s", m.String(), m1.String()) - } - - m1.Answer[0], _ = NewRR("somethingelse.nl. 2311 IN A 127.0.0.1") - if m.String() == m1.String() { - t.Fatalf("Msg.Copy() failed; change to copy changed template %s", m.String()) - } - - rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.2") - m1.Answer = append(m1.Answer, rr) - if m1.Ns[0].String() == m1.Answer[1].String() { - t.Fatalf("Msg.Copy() failed; append changed underlying array %s", m1.Ns[0].String()) - } -} - -func TestMsgPackBuffer(t *testing.T) { - var testMessages = []string{ - // news.ycombinator.com.in.escapemg.com. IN A, response - "586285830001000000010000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001c0210006000100000e10002c036e7332c02103646e730b67726f6f7665736861726bc02d77ed50e600002a3000000e1000093a8000000e10", - - // news.ycombinator.com.in.escapemg.com. IN A, question - "586201000001000000000000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001", - - "398781020001000000000000046e6577730b79636f6d62696e61746f7203636f6d0000010001", - } - - for i, hexData := range testMessages { - // we won't fail the decoding of the hex - input, _ := hex.DecodeString(hexData) - m := new(Msg) - if err := m.Unpack(input); err != nil { - t.Errorf("packet %d failed to unpack", i) - continue - } - t.Logf("packet %d %s", i, m.String()) - } -} diff --git a/vendor/github.com/miekg/dns/dnssec_test.go b/vendor/github.com/miekg/dns/dnssec_test.go deleted file mode 100644 index ca085ed3..00000000 --- a/vendor/github.com/miekg/dns/dnssec_test.go +++ /dev/null @@ -1,733 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "reflect" - "strings" - "testing" - "time" -) - -func getKey() *DNSKEY { - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - return key -} - -func getSoa() *SOA { - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - return soa -} - -func TestGenerateEC(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = ECDSAP256SHA256 - privkey, _ := key.Generate(256) - t.Log(key.String()) - t.Log(key.PrivateKeyString(privkey)) -} - -func TestGenerateDSA(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = DSA - privkey, _ := key.Generate(1024) - t.Log(key.String()) - t.Log(key.PrivateKeyString(privkey)) -} - -func TestGenerateRSA(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - privkey, _ := key.Generate(1024) - t.Log(key.String()) - t.Log(key.PrivateKeyString(privkey)) -} - -func TestSecure(t *testing.T) { - soa := getSoa() - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = TypeSOA - sig.Algorithm = RSASHA256 - sig.Labels = 2 - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.OrigTtl = 14400 - sig.KeyTag = 12051 - sig.SignerName = "miek.nl." - sig.Signature = "oMCbslaAVIp/8kVtLSms3tDABpcPRUgHLrOR48OOplkYo+8TeEGWwkSwaz/MRo2fB4FxW0qj/hTlIjUGuACSd+b1wKdH5GvzRJc2pFmxtCbm55ygAh4EUL0F6U5cKtGJGSXxxg6UFCQ0doJCmiGFa78LolaUOXImJrk6AFrGa0M=" - - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - - // It should validate. Period is checked separately, so this will keep on working - if sig.Verify(key, []RR{soa}) != nil { - t.Error("failure to validate") - } -} - -func TestSignature(t *testing.T) { - sig := new(RRSIG) - sig.Hdr.Name = "miek.nl." - sig.Hdr.Class = ClassINET - sig.Hdr.Ttl = 3600 - sig.TypeCovered = TypeDNSKEY - sig.Algorithm = RSASHA1 - sig.Labels = 2 - sig.OrigTtl = 4000 - sig.Expiration = 1000 //Thu Jan 1 02:06:40 CET 1970 - sig.Inception = 800 //Thu Jan 1 01:13:20 CET 1970 - sig.KeyTag = 34641 - sig.SignerName = "miek.nl." - sig.Signature = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" - - // Should not be valid - if sig.ValidityPeriod(time.Now()) { - t.Error("should not be valid") - } - - sig.Inception = 315565800 //Tue Jan 1 10:10:00 CET 1980 - sig.Expiration = 4102477800 //Fri Jan 1 10:10:00 CET 2100 - if !sig.ValidityPeriod(time.Now()) { - t.Error("should be valid") - } -} - -func TestSignVerify(t *testing.T) { - // The record we want to sign - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - - soa1 := new(SOA) - soa1.Hdr = RR_Header{"*.miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa1.Ns = "open.nlnetlabs.nl." - soa1.Mbox = "miekg.atoom.net." - soa1.Serial = 1293945905 - soa1.Refresh = 14400 - soa1.Retry = 3600 - soa1.Expire = 604800 - soa1.Minttl = 86400 - - srv := new(SRV) - srv.Hdr = RR_Header{"srv.miek.nl.", TypeSRV, ClassINET, 14400, 0} - srv.Port = 1000 - srv.Weight = 800 - srv.Target = "web1.miek.nl." - - hinfo := &HINFO{ - Hdr: RR_Header{ - Name: "miek.nl.", - Rrtype: TypeHINFO, - Class: ClassINET, - Ttl: 3789, - }, - Cpu: "X", - Os: "Y", - } - - // With this key - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - privkey, _ := key.Generate(512) - - // Fill in the values of the Sig, before signing - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = soa.Hdr.Rrtype - sig.Labels = uint8(CountLabel(soa.Hdr.Name)) // works for all 3 - sig.OrigTtl = soa.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() // Get the keyfrom the Key - sig.SignerName = key.Hdr.Name - sig.Algorithm = RSASHA256 - - for _, r := range []RR{soa, soa1, srv, hinfo} { - if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{r}); err != nil { - t.Error("failure to sign the record:", err) - continue - } - if err := sig.Verify(key, []RR{r}); err != nil { - t.Error("failure to validate") - continue - } - t.Logf("validated: %s", r.Header().Name) - } -} - -func Test65534(t *testing.T) { - t6 := new(RFC3597) - t6.Hdr = RR_Header{"miek.nl.", 65534, ClassINET, 14400, 0} - t6.Rdata = "505D870001" - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - privkey, _ := key.Generate(1024) - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = t6.Hdr.Rrtype - sig.Labels = uint8(CountLabel(t6.Hdr.Name)) - sig.OrigTtl = t6.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() - sig.SignerName = key.Hdr.Name - sig.Algorithm = RSASHA256 - if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{t6}); err != nil { - t.Error(err) - t.Error("failure to sign the TYPE65534 record") - } - if err := sig.Verify(key, []RR{t6}); err != nil { - t.Error(err) - t.Error("failure to validate") - } else { - t.Logf("validated: %s", t6.Header().Name) - } -} - -func TestDnskey(t *testing.T) { - pubkey, err := ReadRR(strings.NewReader(` -miek.nl. IN DNSKEY 256 3 10 AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL ;{id = 5240 (zsk), size = 1024b} -`), "Kmiek.nl.+010+05240.key") - if err != nil { - t.Fatal(err) - } - privStr := `Private-key-format: v1.3 -Algorithm: 10 (RSASHA512) -Modulus: m4wK7YV26AeROtdiCXmqLG9wPDVoMOW8vjr/EkpscEAdjXp81RvZvrlzCSjYmz9onFRgltmTl3AINnFh+t9tlW0M9C5zejxBoKFXELv8ljPYAdz2oe+pDWPhWsfvVFYg2VCjpViPM38EakyE5mhk4TDOnUd+w4TeU1hyhZTWyYs= -PublicExponent: AQAB -PrivateExponent: UfCoIQ/Z38l8vB6SSqOI/feGjHEl/fxIPX4euKf0D/32k30fHbSaNFrFOuIFmWMB3LimWVEs6u3dpbB9CQeCVg7hwU5puG7OtuiZJgDAhNeOnxvo5btp4XzPZrJSxR4WNQnwIiYWbl0aFlL1VGgHC/3By89ENZyWaZcMLW4KGWE= -Prime1: yxwC6ogAu8aVcDx2wg1V0b5M5P6jP8qkRFVMxWNTw60Vkn+ECvw6YAZZBHZPaMyRYZLzPgUlyYRd0cjupy4+fQ== -Prime2: xA1bF8M0RTIQ6+A11AoVG6GIR/aPGg5sogRkIZ7ID/sF6g9HMVU/CM2TqVEBJLRPp73cv6ZeC3bcqOCqZhz+pw== -Exponent1: xzkblyZ96bGYxTVZm2/vHMOXswod4KWIyMoOepK6B/ZPcZoIT6omLCgtypWtwHLfqyCz3MK51Nc0G2EGzg8rFQ== -Exponent2: Pu5+mCEb7T5F+kFNZhQadHUklt0JUHbi3hsEvVoHpEGSw3BGDQrtIflDde0/rbWHgDPM4WQY+hscd8UuTXrvLw== -Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1gf8zENMYwYLeWpuYlFQ== -` - privkey, err := pubkey.(*DNSKEY).ReadPrivateKey(strings.NewReader(privStr), - "Kmiek.nl.+010+05240.private") - if err != nil { - t.Fatal(err) - } - if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" { - t.Error("pubkey is not what we've read") - } - if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr { - t.Error("privkey is not what we've read") - t.Errorf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey)) - } -} - -func TestTag(t *testing.T) { - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 3600 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - - tag := key.KeyTag() - if tag != 12051 { - t.Errorf("wrong key tag: %d for key %v", tag, key) - } -} - -func TestKeyRSA(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 3600 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - priv, _ := key.Generate(2048) - - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = TypeSOA - sig.Algorithm = RSASHA256 - sig.Labels = 2 - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.OrigTtl = soa.Hdr.Ttl - sig.KeyTag = key.KeyTag() - sig.SignerName = key.Hdr.Name - - if err := sig.Sign(priv.(*rsa.PrivateKey), []RR{soa}); err != nil { - t.Error("failed to sign") - return - } - if err := sig.Verify(key, []RR{soa}); err != nil { - t.Error("failed to verify") - } -} - -func TestKeyToDS(t *testing.T) { - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 3600 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - - ds := key.ToDS(SHA1) - if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" { - t.Errorf("wrong DS digest for SHA1\n%v", ds) - } -} - -func TestSignRSA(t *testing.T) { - pub := "miek.nl. IN DNSKEY 256 3 5 AwEAAb+8lGNCxJgLS8rYVer6EnHVuIkQDghdjdtewDzU3G5R7PbMbKVRvH2Ma7pQyYceoaqWZQirSj72euPWfPxQnMy9ucCylA+FuH9cSjIcPf4PqJfdupHk9X6EBYjxrCLY4p1/yBwgyBIRJtZtAqM3ceAH2WovEJD6rTtOuHo5AluJ" - - priv := `Private-key-format: v1.3 -Algorithm: 5 (RSASHA1) -Modulus: v7yUY0LEmAtLythV6voScdW4iRAOCF2N217APNTcblHs9sxspVG8fYxrulDJhx6hqpZlCKtKPvZ649Z8/FCczL25wLKUD4W4f1xKMhw9/g+ol926keT1foQFiPGsItjinX/IHCDIEhEm1m0Cozdx4AfZai8QkPqtO064ejkCW4k= -PublicExponent: AQAB -PrivateExponent: YPwEmwjk5HuiROKU4xzHQ6l1hG8Iiha4cKRG3P5W2b66/EN/GUh07ZSf0UiYB67o257jUDVEgwCuPJz776zfApcCB4oGV+YDyEu7Hp/rL8KcSN0la0k2r9scKwxTp4BTJT23zyBFXsV/1wRDK1A5NxsHPDMYi2SoK63Enm/1ptk= -Prime1: /wjOG+fD0ybNoSRn7nQ79udGeR1b0YhUA5mNjDx/x2fxtIXzygYk0Rhx9QFfDy6LOBvz92gbNQlzCLz3DJt5hw== -Prime2: wHZsJ8OGhkp5p3mrJFZXMDc2mbYusDVTA+t+iRPdS797Tj0pjvU2HN4vTnTj8KBQp6hmnY7dLp9Y1qserySGbw== -Exponent1: N0A7FsSRIg+IAN8YPQqlawoTtG1t1OkJ+nWrurPootScApX6iMvn8fyvw3p2k51rv84efnzpWAYiC8SUaQDNxQ== -Exponent2: SvuYRaGyvo0zemE3oS+WRm2scxR8eiA8WJGeOc+obwOKCcBgeZblXzfdHGcEC1KaOcetOwNW/vwMA46lpLzJNw== -Coefficient: 8+7ZN/JgByqv0NfULiFKTjtyegUcijRuyij7yNxYbCBneDvZGxJwKNi4YYXWx743pcAj4Oi4Oh86gcmxLs+hGw== -Created: 20110302104537 -Publish: 20110302104537 -Activate: 20110302104537` - - xk, _ := NewRR(pub) - k := xk.(*DNSKEY) - p, err := k.NewPrivateKey(priv) - if err != nil { - t.Error(err) - } - switch priv := p.(type) { - case *rsa.PrivateKey: - if 65537 != priv.PublicKey.E { - t.Error("exponenent should be 65537") - } - default: - t.Errorf("we should have read an RSA key: %v", priv) - } - if k.KeyTag() != 37350 { - t.Errorf("keytag should be 37350, got %d %v", k.KeyTag(), k) - } - - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = k.KeyTag() - sig.SignerName = k.Hdr.Name - sig.Algorithm = k.Algorithm - - sig.Sign(p.(*rsa.PrivateKey), []RR{soa}) - if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" { - t.Errorf("signature is not correct: %v", sig) - } -} - -func TestSignVerifyECDSA(t *testing.T) { - pub := `example.net. 3600 IN DNSKEY 257 3 14 ( - xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 - w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 - /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` - priv := `Private-key-format: v1.2 -Algorithm: 14 (ECDSAP384SHA384) -PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` - - eckey, err := NewRR(pub) - if err != nil { - t.Fatal(err) - } - privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv) - if err != nil { - t.Fatal(err) - } - // TODO: Create separate test for this - ds := eckey.(*DNSKEY).ToDS(SHA384) - if ds.KeyTag != 10771 { - t.Fatal("wrong keytag on DS") - } - if ds.Digest != "72d7b62976ce06438e9c0bf319013cf801f09ecc84b8d7e9495f27e305c6a9b0563a9b5f4d288405c3008a946df983d6" { - t.Fatal("wrong DS Digest") - } - a, _ := NewRR("www.example.net. 3600 IN A 192.0.2.1") - sig := new(RRSIG) - sig.Hdr = RR_Header{"example.net.", TypeRRSIG, ClassINET, 14400, 0} - sig.Expiration, _ = StringToTime("20100909102025") - sig.Inception, _ = StringToTime("20100812102025") - sig.KeyTag = eckey.(*DNSKEY).KeyTag() - sig.SignerName = eckey.(*DNSKEY).Hdr.Name - sig.Algorithm = eckey.(*DNSKEY).Algorithm - - if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{a}) != nil { - t.Fatal("failure to sign the record") - } - - if err := sig.Verify(eckey.(*DNSKEY), []RR{a}); err != nil { - t.Fatalf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", - eckey.(*DNSKEY).String(), - a.String(), - sig.String(), - eckey.(*DNSKEY).PrivateKeyString(privkey), - err, - ) - } -} - -func TestSignVerifyECDSA2(t *testing.T) { - srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.") - if err != nil { - t.Fatal(err) - } - srv := srv1.(*SRV) - - // With this key - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = ECDSAP256SHA256 - privkey, err := key.Generate(256) - if err != nil { - t.Fatal("failure to generate key") - } - - // Fill in the values of the Sig, before signing - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = srv.Hdr.Rrtype - sig.Labels = uint8(CountLabel(srv.Hdr.Name)) // works for all 3 - sig.OrigTtl = srv.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() // Get the keyfrom the Key - sig.SignerName = key.Hdr.Name - sig.Algorithm = ECDSAP256SHA256 - - if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{srv}) != nil { - t.Fatal("failure to sign the record") - } - - err = sig.Verify(key, []RR{srv}) - if err != nil { - t.Logf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", - key.String(), - srv.String(), - sig.String(), - key.PrivateKeyString(privkey), - err, - ) - } -} - -// Here the test vectors from the relevant RFCs are checked. -// rfc6605 6.1 -func TestRFC6605P256(t *testing.T) { - exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 13 ( - GojIhhXUN/u4v54ZQqGSnyhWJwaubCvTmeexv7bR6edb - krSqQpF64cYbcB7wNcP+e+MAnLr+Wi9xMWyQLc8NAA== )` - exPriv := `Private-key-format: v1.2 -Algorithm: 13 (ECDSAP256SHA256) -PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=` - rrDNSKEY, err := NewRR(exDNSKEY) - if err != nil { - t.Fatal(err) - } - priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) - if err != nil { - t.Fatal(err) - } - - exDS := `example.net. 3600 IN DS 55648 13 2 ( - b4c8c1fe2e7477127b27115656ad6256f424625bf5c1 - e2770ce6d6e37df61d17 )` - rrDS, err := NewRR(exDS) - if err != nil { - t.Fatal(err) - } - ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256) - if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { - t.Errorf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) - } - - exA := `www.example.net. 3600 IN A 192.0.2.1` - exRRSIG := `www.example.net. 3600 IN RRSIG A 13 3 3600 ( - 20100909100439 20100812100439 55648 example.net. - qx6wLYqmh+l9oCKTN6qIc+bw6ya+KJ8oMz0YP107epXA - yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )` - rrA, err := NewRR(exA) - if err != nil { - t.Fatal(err) - } - rrRRSIG, err := NewRR(exRRSIG) - if err != nil { - t.Fatal(err) - } - if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("failure to validate the spec RRSIG: %v", err) - } - - ourRRSIG := &RRSIG{ - Hdr: RR_Header{ - Ttl: rrA.Header().Ttl, - }, - KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), - SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, - Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, - } - ourRRSIG.Expiration, _ = StringToTime("20100909100439") - ourRRSIG.Inception, _ = StringToTime("20100812100439") - err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA}) - if err != nil { - t.Fatal(err) - } - - if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("failure to validate our RRSIG: %v", err) - } - - // Signatures are randomized - rrRRSIG.(*RRSIG).Signature = "" - ourRRSIG.Signature = "" - if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { - t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) - } -} - -// rfc6605 6.2 -func TestRFC6605P384(t *testing.T) { - exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 14 ( - xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 - w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 - /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` - exPriv := `Private-key-format: v1.2 -Algorithm: 14 (ECDSAP384SHA384) -PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` - rrDNSKEY, err := NewRR(exDNSKEY) - if err != nil { - t.Fatal(err) - } - priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) - if err != nil { - t.Fatal(err) - } - - exDS := `example.net. 3600 IN DS 10771 14 4 ( - 72d7b62976ce06438e9c0bf319013cf801f09ecc84b8 - d7e9495f27e305c6a9b0563a9b5f4d288405c3008a94 - 6df983d6 )` - rrDS, err := NewRR(exDS) - if err != nil { - t.Fatal(err) - } - ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384) - if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { - t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) - } - - exA := `www.example.net. 3600 IN A 192.0.2.1` - exRRSIG := `www.example.net. 3600 IN RRSIG A 14 3 3600 ( - 20100909102025 20100812102025 10771 example.net. - /L5hDKIvGDyI1fcARX3z65qrmPsVz73QD1Mr5CEqOiLP - 95hxQouuroGCeZOvzFaxsT8Glr74hbavRKayJNuydCuz - WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )` - rrA, err := NewRR(exA) - if err != nil { - t.Fatal(err) - } - rrRRSIG, err := NewRR(exRRSIG) - if err != nil { - t.Fatal(err) - } - if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("failure to validate the spec RRSIG: %v", err) - } - - ourRRSIG := &RRSIG{ - Hdr: RR_Header{ - Ttl: rrA.Header().Ttl, - }, - KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), - SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, - Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, - } - ourRRSIG.Expiration, _ = StringToTime("20100909102025") - ourRRSIG.Inception, _ = StringToTime("20100812102025") - err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA}) - if err != nil { - t.Fatal(err) - } - - if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("failure to validate our RRSIG: %v", err) - } - - // Signatures are randomized - rrRRSIG.(*RRSIG).Signature = "" - ourRRSIG.Signature = "" - if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { - t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) - } -} - -func TestInvalidRRSet(t *testing.T) { - goodRecords := make([]RR, 2) - goodRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - goodRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}} - - // Generate key - keyname := "cloudflare.com." - key := &DNSKEY{ - Hdr: RR_Header{Name: keyname, Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 0}, - Algorithm: ECDSAP256SHA256, - Flags: ZONE, - Protocol: 3, - } - privatekey, err := key.Generate(256) - if err != nil { - t.Fatal(err.Error()) - } - - // Need to fill in: Inception, Expiration, KeyTag, SignerName and Algorithm - curTime := time.Now() - signature := &RRSIG{ - Inception: uint32(curTime.Unix()), - Expiration: uint32(curTime.Add(time.Hour).Unix()), - KeyTag: key.KeyTag(), - SignerName: keyname, - Algorithm: ECDSAP256SHA256, - } - - // Inconsistent name between records - badRecords := make([]RR, 2) - badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - badRecords[1] = &TXT{Hdr: RR_Header{Name: "nama.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}} - - if IsRRset(badRecords) { - t.Fatal("Record set with inconsistent names considered valid") - } - - badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - badRecords[1] = &A{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeA, Class: ClassINET, Ttl: 0}} - - if IsRRset(badRecords) { - t.Fatal("Record set with inconsistent record types considered valid") - } - - badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - badRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassCHAOS, Ttl: 0}, Txt: []string{"_o/"}} - - if IsRRset(badRecords) { - t.Fatal("Record set with inconsistent record class considered valid") - } - - // Sign the good record set and then make sure verification fails on the bad record set - if err := signature.Sign(privatekey.(crypto.Signer), goodRecords); err != nil { - t.Fatal("Signing good records failed") - } - - if err := signature.Verify(key, badRecords); err != ErrRRset { - t.Fatal("Verification did not return ErrRRset with inconsistent records") - } -} diff --git a/vendor/github.com/miekg/dns/dnsutil/util.go b/vendor/github.com/miekg/dns/dnsutil/util.go deleted file mode 100644 index 9ed03f29..00000000 --- a/vendor/github.com/miekg/dns/dnsutil/util.go +++ /dev/null @@ -1,79 +0,0 @@ -// Package dnsutil contains higher-level methods useful with the dns -// package. While package dns implements the DNS protocols itself, -// these functions are related but not directly required for protocol -// processing. They are often useful in preparing input/output of the -// functions in package dns. -package dnsutil - -import ( - "strings" - - "github.com/miekg/dns" -) - -// AddDomain adds origin to s if s is not already a FQDN. -// Note that the result may not be a FQDN. If origin does not end -// with a ".", the result won't either. -// This implements the zonefile convention (specified in RFC 1035, -// Section "5.1. Format") that "@" represents the -// apex (bare) domain. i.e. AddOrigin("@", "foo.com.") returns "foo.com.". -func AddOrigin(s, origin string) string { - // ("foo.", "origin.") -> "foo." (already a FQDN) - // ("foo", "origin.") -> "foo.origin." - // ("foo"), "origin" -> "foo.origin" - // ("@", "origin.") -> "origin." (@ represents the apex (bare) domain) - // ("", "origin.") -> "origin." (not obvious) - // ("foo", "") -> "foo" (not obvious) - - if dns.IsFqdn(s) { - return s // s is already a FQDN, no need to mess with it. - } - if len(origin) == 0 { - return s // Nothing to append. - } - if s == "@" || len(s) == 0 { - return origin // Expand apex. - } - - if origin == "." { - return s + origin // AddOrigin(s, ".") is an expensive way to add a ".". - } - - return s + "." + origin // The simple case. -} - -// TrimDomainName trims origin from s if s is a subdomain. -// This function will never return "", but returns "@" instead (@ represents the apex (bare) domain). -func TrimDomainName(s, origin string) string { - // An apex (bare) domain is always returned as "@". - // If the return value ends in a ".", the domain was not the suffix. - // origin can end in "." or not. Either way the results should be the same. - - if len(s) == 0 { - return "@" // Return the apex (@) rather than "". - } - // Someone is using TrimDomainName(s, ".") to remove a dot if it exists. - if origin == "." { - return strings.TrimSuffix(s, origin) - } - - // Dude, you aren't even if the right subdomain! - if !dns.IsSubDomain(origin, s) { - return s - } - - slabels := dns.Split(s) - olabels := dns.Split(origin) - m := dns.CompareDomainName(s, origin) - if len(olabels) == m { - if len(olabels) == len(slabels) { - return "@" // origin == s - } - if (s[0] == '.') && (len(slabels) == (len(olabels) + 1)) { - return "@" // TrimDomainName(".foo.", "foo.") - } - } - - // Return the first (len-m) labels: - return s[:slabels[len(slabels)-m]-1] -} diff --git a/vendor/github.com/miekg/dns/dnsutil/util_test.go b/vendor/github.com/miekg/dns/dnsutil/util_test.go deleted file mode 100644 index 0f1ecec8..00000000 --- a/vendor/github.com/miekg/dns/dnsutil/util_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package dnsutil - -import "testing" - -func TestAddOrigin(t *testing.T) { - var tests = []struct{ e1, e2, expected string }{ - {"@", "example.com", "example.com"}, - {"foo", "example.com", "foo.example.com"}, - {"foo.", "example.com", "foo."}, - {"@", "example.com.", "example.com."}, - {"foo", "example.com.", "foo.example.com."}, - {"foo.", "example.com.", "foo."}, - // Oddball tests: - // In general origin should not be "" or "." but at least - // these tests verify we don't crash and will keep results - // from changing unexpectedly. - {"*.", "", "*."}, - {"@", "", "@"}, - {"foobar", "", "foobar"}, - {"foobar.", "", "foobar."}, - {"*.", ".", "*."}, - {"@", ".", "."}, - {"foobar", ".", "foobar."}, - {"foobar.", ".", "foobar."}, - } - for _, test := range tests { - actual := AddOrigin(test.e1, test.e2) - if test.expected != actual { - t.Errorf("AddOrigin(%#v, %#v) expected %#v, go %#v\n", test.e1, test.e2, test.expected, actual) - } - } -} - -func TestTrimDomainName(t *testing.T) { - - // Basic tests. - // Try trimming "example.com" and "example.com." from typical use cases. - var tests_examplecom = []struct{ experiment, expected string }{ - {"foo.example.com", "foo"}, - {"foo.example.com.", "foo"}, - {".foo.example.com", ".foo"}, - {".foo.example.com.", ".foo"}, - {"*.example.com", "*"}, - {"example.com", "@"}, - {"example.com.", "@"}, - {"com.", "com."}, - {"foo.", "foo."}, - {"serverfault.com.", "serverfault.com."}, - {"serverfault.com", "serverfault.com"}, - {".foo.ronco.com", ".foo.ronco.com"}, - {".foo.ronco.com.", ".foo.ronco.com."}, - } - for _, dom := range []string{"example.com", "example.com."} { - for i, test := range tests_examplecom { - actual := TrimDomainName(test.experiment, dom) - if test.expected != actual { - t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.experiment, dom, test.expected, actual) - } - } - } - - // Paranoid tests. - // These test shouldn't be needed but I was weary of off-by-one errors. - // In theory, these can't happen because there are no single-letter TLDs, - // but it is good to exercize the code this way. - var tests = []struct{ experiment, expected string }{ - {"", "@"}, - {".", "."}, - {"a.b.c.d.e.f.", "a.b.c.d.e"}, - {"b.c.d.e.f.", "b.c.d.e"}, - {"c.d.e.f.", "c.d.e"}, - {"d.e.f.", "d.e"}, - {"e.f.", "e"}, - {"f.", "@"}, - {".a.b.c.d.e.f.", ".a.b.c.d.e"}, - {".b.c.d.e.f.", ".b.c.d.e"}, - {".c.d.e.f.", ".c.d.e"}, - {".d.e.f.", ".d.e"}, - {".e.f.", ".e"}, - {".f.", "@"}, - {"a.b.c.d.e.f", "a.b.c.d.e"}, - {"a.b.c.d.e.", "a.b.c.d.e."}, - {"a.b.c.d.e", "a.b.c.d.e"}, - {"a.b.c.d.", "a.b.c.d."}, - {"a.b.c.d", "a.b.c.d"}, - {"a.b.c.", "a.b.c."}, - {"a.b.c", "a.b.c"}, - {"a.b.", "a.b."}, - {"a.b", "a.b"}, - {"a.", "a."}, - {"a", "a"}, - {".a.b.c.d.e.f", ".a.b.c.d.e"}, - {".a.b.c.d.e.", ".a.b.c.d.e."}, - {".a.b.c.d.e", ".a.b.c.d.e"}, - {".a.b.c.d.", ".a.b.c.d."}, - {".a.b.c.d", ".a.b.c.d"}, - {".a.b.c.", ".a.b.c."}, - {".a.b.c", ".a.b.c"}, - {".a.b.", ".a.b."}, - {".a.b", ".a.b"}, - {".a.", ".a."}, - {".a", ".a"}, - } - for _, dom := range []string{"f", "f."} { - for i, test := range tests { - actual := TrimDomainName(test.experiment, dom) - if test.expected != actual { - t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.experiment, dom, test.expected, actual) - } - } - } - - // Test cases for bugs found in the wild. - // These test cases provide both origin, s, and the expected result. - // If you find a bug in the while, this is probably the easiest place - // to add it as a test case. - var tests_wild = []struct{ e1, e2, expected string }{ - {"mathoverflow.net.", ".", "mathoverflow.net"}, - {"mathoverflow.net", ".", "mathoverflow.net"}, - {"", ".", "@"}, - {"@", ".", "@"}, - } - for i, test := range tests_wild { - actual := TrimDomainName(test.e1, test.e2) - if test.expected != actual { - t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.e1, test.e2, test.expected, actual) - } - } - -} diff --git a/vendor/github.com/miekg/dns/dyn_test.go b/vendor/github.com/miekg/dns/dyn_test.go deleted file mode 100644 index 09986a5e..00000000 --- a/vendor/github.com/miekg/dns/dyn_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package dns - -// Find better solution diff --git a/vendor/github.com/miekg/dns/edns_test.go b/vendor/github.com/miekg/dns/edns_test.go deleted file mode 100644 index c290b0c8..00000000 --- a/vendor/github.com/miekg/dns/edns_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package dns - -import "testing" - -func TestOPTTtl(t *testing.T) { - e := &OPT{} - e.Hdr.Name = "." - e.Hdr.Rrtype = TypeOPT - - // verify the default setting of DO=0 - if e.Do() { - t.Errorf("DO bit should be zero") - } - - // There are 6 possible invocations of SetDo(): - // - // 1. Starting with DO=0, using SetDo() - // 2. Starting with DO=0, using SetDo(true) - // 3. Starting with DO=0, using SetDo(false) - // 4. Starting with DO=1, using SetDo() - // 5. Starting with DO=1, using SetDo(true) - // 6. Starting with DO=1, using SetDo(false) - - // verify that invoking SetDo() sets DO=1 (TEST #1) - e.SetDo() - if !e.Do() { - t.Errorf("DO bit should be non-zero") - } - // verify that using SetDo(true) works when DO=1 (TEST #5) - e.SetDo(true) - if !e.Do() { - t.Errorf("DO bit should still be non-zero") - } - // verify that we can use SetDo(false) to set DO=0 (TEST #6) - e.SetDo(false) - if e.Do() { - t.Errorf("DO bit should be zero") - } - // verify that if we call SetDo(false) when DO=0 that it is unchanged (TEST #3) - e.SetDo(false) - if e.Do() { - t.Errorf("DO bit should still be zero") - } - // verify that using SetDo(true) works for DO=0 (TEST #2) - e.SetDo(true) - if !e.Do() { - t.Errorf("DO bit should be non-zero") - } - // verify that using SetDo() works for DO=1 (TEST #4) - e.SetDo() - if !e.Do() { - t.Errorf("DO bit should be non-zero") - } - - if e.Version() != 0 { - t.Errorf("version should be non-zero") - } - - e.SetVersion(42) - if e.Version() != 42 { - t.Errorf("set 42, expected %d, got %d", 42, e.Version()) - } - - e.SetExtendedRcode(42) - if e.ExtendedRcode() != 42 { - t.Errorf("set 42, expected %d, got %d", 42-15, e.ExtendedRcode()) - } -} diff --git a/vendor/github.com/miekg/dns/example_test.go b/vendor/github.com/miekg/dns/example_test.go deleted file mode 100644 index 64c14962..00000000 --- a/vendor/github.com/miekg/dns/example_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package dns_test - -import ( - "errors" - "fmt" - "log" - "net" - - "github.com/miekg/dns" -) - -// Retrieve the MX records for miek.nl. -func ExampleMX() { - config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") - c := new(dns.Client) - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - m.RecursionDesired = true - r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) - if err != nil { - return - } - if r.Rcode != dns.RcodeSuccess { - return - } - for _, a := range r.Answer { - if mx, ok := a.(*dns.MX); ok { - fmt.Printf("%s\n", mx.String()) - } - } -} - -// Retrieve the DNSKEY records of a zone and convert them -// to DS records for SHA1, SHA256 and SHA384. -func ExampleDS() { - config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") - c := new(dns.Client) - m := new(dns.Msg) - zone := "miek.nl" - m.SetQuestion(dns.Fqdn(zone), dns.TypeDNSKEY) - m.SetEdns0(4096, true) - r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) - if err != nil { - return - } - if r.Rcode != dns.RcodeSuccess { - return - } - for _, k := range r.Answer { - if key, ok := k.(*dns.DNSKEY); ok { - for _, alg := range []uint8{dns.SHA1, dns.SHA256, dns.SHA384} { - fmt.Printf("%s; %d\n", key.ToDS(alg).String(), key.Flags) - } - } - } -} - -const TypeAPAIR = 0x0F99 - -type APAIR struct { - addr [2]net.IP -} - -func NewAPAIR() dns.PrivateRdata { return new(APAIR) } - -func (rd *APAIR) String() string { return rd.addr[0].String() + " " + rd.addr[1].String() } -func (rd *APAIR) Parse(txt []string) error { - if len(txt) != 2 { - return errors.New("two addresses required for APAIR") - } - for i, s := range txt { - ip := net.ParseIP(s) - if ip == nil { - return errors.New("invalid IP in APAIR text representation") - } - rd.addr[i] = ip - } - return nil -} - -func (rd *APAIR) Pack(buf []byte) (int, error) { - b := append([]byte(rd.addr[0]), []byte(rd.addr[1])...) - n := copy(buf, b) - if n != len(b) { - return n, dns.ErrBuf - } - return n, nil -} - -func (rd *APAIR) Unpack(buf []byte) (int, error) { - ln := net.IPv4len * 2 - if len(buf) != ln { - return 0, errors.New("invalid length of APAIR rdata") - } - cp := make([]byte, ln) - copy(cp, buf) // clone bytes to use them in IPs - - rd.addr[0] = net.IP(cp[:3]) - rd.addr[1] = net.IP(cp[4:]) - - return len(buf), nil -} - -func (rd *APAIR) Copy(dest dns.PrivateRdata) error { - cp := make([]byte, rd.Len()) - _, err := rd.Pack(cp) - if err != nil { - return err - } - - d := dest.(*APAIR) - d.addr[0] = net.IP(cp[:3]) - d.addr[1] = net.IP(cp[4:]) - return nil -} - -func (rd *APAIR) Len() int { - return net.IPv4len * 2 -} - -func ExamplePrivateHandle() { - dns.PrivateHandle("APAIR", TypeAPAIR, NewAPAIR) - defer dns.PrivateHandleRemove(TypeAPAIR) - - rr, err := dns.NewRR("miek.nl. APAIR (1.2.3.4 1.2.3.5)") - if err != nil { - log.Fatal("could not parse APAIR record: ", err) - } - fmt.Println(rr) - // Output: miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 - - m := new(dns.Msg) - m.Id = 12345 - m.SetQuestion("miek.nl.", TypeAPAIR) - m.Answer = append(m.Answer, rr) - - fmt.Println(m) - // ;; opcode: QUERY, status: NOERROR, id: 12345 - // ;; flags: rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 - // - // ;; QUESTION SECTION: - // ;miek.nl. IN APAIR - // - // ;; ANSWER SECTION: - // miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 -} diff --git a/vendor/github.com/miekg/dns/fuzz_test.go b/vendor/github.com/miekg/dns/fuzz_test.go deleted file mode 100644 index 25586973..00000000 --- a/vendor/github.com/miekg/dns/fuzz_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package dns - -import "testing" - -func TestFuzzString(t *testing.T) { - testcases := []string{"", " MINFO ", " RP ", " NSEC 0 0", " \" NSEC 0 0\"", " \" MINFO \"", - ";a ", ";a����������", - " NSAP O ", " NSAP N ", - " TYPE4 TYPE6a789a3bc0045c8a5fb42c7d1bd998f5444 IN 9579b47d46817afbd17273e6", - " TYPE45 3 3 4147994 TYPE\\(\\)\\)\\(\\)\\(\\(\\)\\(\\)\\)\\)\\(\\)\\(\\)\\(\\(\\R 948\"\")\\(\\)\\)\\)\\(\\ ", - "$GENERATE 0-3 ${441189,5039418474430,o}", - "$INCLUDE 00 TYPE00000000000n ", - "$INCLUDE PE4 TYPE061463623/727071511 \\(\\)\\$GENERATE 6-462/0", - } - for i, tc := range testcases { - rr, err := NewRR(tc) - if err == nil { - // rr can be nil because we can (for instance) just parse a comment - if rr == nil { - continue - } - t.Fatalf("parsed mailformed RR %d: %s", i, rr.String()) - } - } -} diff --git a/vendor/github.com/miekg/dns/idn/code_points.go b/vendor/github.com/miekg/dns/idn/code_points.go deleted file mode 100644 index 129c3742..00000000 --- a/vendor/github.com/miekg/dns/idn/code_points.go +++ /dev/null @@ -1,2346 +0,0 @@ -package idn - -const ( - propertyUnknown property = iota // unknown character property - propertyPVALID // allowed to be used in IDNs - propertyCONTEXTJ // invisible or problematic characters (join controls) - propertyCONTEXTO // invisible or problematic characters (others) - propertyDISALLOWED // should not be included in IDNs - propertyUNASSIGNED // code points that are not designated in the Unicode Standard -) - -// property stores the property of a code point, as described in RFC 5892, -// section 1 -type property int - -// codePoints list all code points in Unicode Character Database (UCD) Format -// according to RFC 5892, appendix B.1. Thanks to libidn2 (GNU) - -// http://www.gnu.org/software/libidn/libidn2/ -var codePoints = []struct { - start rune - end rune - state property -}{ - {0x0000, 0x002C, propertyDISALLOWED}, // ..COMMA - {0x002D, 0x0, propertyPVALID}, // HYPHEN-MINUS - {0x002E, 0x002F, propertyDISALLOWED}, // FULL STOP..SOLIDUS - {0x0030, 0x0039, propertyPVALID}, // DIGIT ZERO..DIGIT NINE - {0x003A, 0x0060, propertyDISALLOWED}, // COLON..GRAVE ACCENT - {0x0041, 0x005A, propertyPVALID}, // LATIN CAPITAL LETTER A..LATIN CAPITAL LETTER Z - {0x0061, 0x007A, propertyPVALID}, // LATIN SMALL LETTER A..LATIN SMALL LETTER Z - {0x007B, 0x00B6, propertyDISALLOWED}, // LEFT CURLY BRACKET..PILCROW SIGN - {0x00B7, 0x0, propertyCONTEXTO}, // MIDDLE DOT - {0x00B8, 0x00DE, propertyDISALLOWED}, // CEDILLA..LATIN CAPITAL LETTER THORN - {0x00DF, 0x00F6, propertyPVALID}, // LATIN SMALL LETTER SHARP S..LATIN SMALL LETT - {0x00F7, 0x0, propertyDISALLOWED}, // DIVISION SIGN - {0x00F8, 0x00FF, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE..LATIN SMAL - {0x0100, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH MACRON - {0x0101, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH MACRON - {0x0102, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE - {0x0103, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE - {0x0104, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH OGONEK - {0x0105, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH OGONEK - {0x0106, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH ACUTE - {0x0107, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH ACUTE - {0x0108, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CIRCUMFLEX - {0x0109, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CIRCUMFLEX - {0x010A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH DOT ABOVE - {0x010B, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH DOT ABOVE - {0x010C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CARON - {0x010D, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CARON - {0x010E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CARON - {0x010F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CARON - {0x0110, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH STROKE - {0x0111, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH STROKE - {0x0112, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON - {0x0113, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON - {0x0114, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH BREVE - {0x0115, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH BREVE - {0x0116, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT ABOVE - {0x0117, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT ABOVE - {0x0118, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH OGONEK - {0x0119, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH OGONEK - {0x011A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CARON - {0x011B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CARON - {0x011C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CIRCUMFLEX - {0x011D, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CIRCUMFLEX - {0x011E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH BREVE - {0x011F, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH BREVE - {0x0120, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH DOT ABOVE - {0x0121, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH DOT ABOVE - {0x0122, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CEDILLA - {0x0123, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CEDILLA - {0x0124, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CIRCUMFLEX - {0x0125, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CIRCUMFLEX - {0x0126, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH STROKE - {0x0127, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH STROKE - {0x0128, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE - {0x0129, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE - {0x012A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH MACRON - {0x012B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH MACRON - {0x012C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH BREVE - {0x012D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH BREVE - {0x012E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH OGONEK - {0x012F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH OGONEK - {0x0130, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT ABOVE - {0x0131, 0x0, propertyPVALID}, // LATIN SMALL LETTER DOTLESS I - {0x0132, 0x0134, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE IJ..LATIN CAPITAL LET - {0x0135, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH CIRCUMFLEX - {0x0136, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CEDILLA - {0x0137, 0x0138, propertyPVALID}, // LATIN SMALL LETTER K WITH CEDILLA..LATIN SMA - {0x0139, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH ACUTE - {0x013A, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH ACUTE - {0x013B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CEDILLA - {0x013C, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CEDILLA - {0x013D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CARON - {0x013E, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CARON - {0x013F, 0x0141, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE DOT..LATI - {0x0142, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH STROKE - {0x0143, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH ACUTE - {0x0144, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH ACUTE - {0x0145, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CEDILLA - {0x0146, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CEDILLA - {0x0147, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CARON - {0x0148, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CARON - {0x0149, 0x014A, propertyDISALLOWED}, // LATIN SMALL LETTER N PRECEDED BY APOSTROPHE. - {0x014B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ENG - {0x014C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON - {0x014D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON - {0x014E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH BREVE - {0x014F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH BREVE - {0x0150, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE ACUTE - {0x0151, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE ACUTE - {0x0152, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE OE - {0x0153, 0x0, propertyPVALID}, // LATIN SMALL LIGATURE OE - {0x0154, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH ACUTE - {0x0155, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH ACUTE - {0x0156, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CEDILLA - {0x0157, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CEDILLA - {0x0158, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CARON - {0x0159, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CARON - {0x015A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE - {0x015B, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE - {0x015C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CIRCUMFLEX - {0x015D, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CIRCUMFLEX - {0x015E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CEDILLA - {0x015F, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CEDILLA - {0x0160, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON - {0x0161, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON - {0x0162, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CEDILLA - {0x0163, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CEDILLA - {0x0164, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CARON - {0x0165, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CARON - {0x0166, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH STROKE - {0x0167, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH STROKE - {0x0168, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE - {0x0169, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE - {0x016A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON - {0x016B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON - {0x016C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH BREVE - {0x016D, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH BREVE - {0x016E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH RING ABOVE - {0x016F, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH RING ABOVE - {0x0170, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE ACUTE - {0x0171, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE ACUTE - {0x0172, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH OGONEK - {0x0173, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH OGONEK - {0x0174, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH CIRCUMFLEX - {0x0175, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH CIRCUMFLEX - {0x0176, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH CIRCUMFLEX - {0x0177, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH CIRCUMFLEX - {0x0178, 0x0179, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DIAERESIS..LATIN - {0x017A, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH ACUTE - {0x017B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT ABOVE - {0x017C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT ABOVE - {0x017D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CARON - {0x017E, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CARON - {0x017F, 0x0, propertyDISALLOWED}, // LATIN SMALL LETTER LONG S - {0x0180, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH STROKE - {0x0181, 0x0182, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH HOOK..LATIN CAPI - {0x0183, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH TOPBAR - {0x0184, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE SIX - {0x0185, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE SIX - {0x0186, 0x0187, propertyDISALLOWED}, // LATIN CAPITAL LETTER OPEN O..LATIN CAPITAL L - {0x0188, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH HOOK - {0x0189, 0x018B, propertyDISALLOWED}, // LATIN CAPITAL LETTER AFRICAN D..LATIN CAPITA - {0x018C, 0x018D, propertyPVALID}, // LATIN SMALL LETTER D WITH TOPBAR..LATIN SMAL - {0x018E, 0x0191, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED E..LATIN CAPIT - {0x0192, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH HOOK - {0x0193, 0x0194, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH HOOK..LATIN CAPI - {0x0195, 0x0, propertyPVALID}, // LATIN SMALL LETTER HV - {0x0196, 0x0198, propertyDISALLOWED}, // LATIN CAPITAL LETTER IOTA..LATIN CAPITAL LET - {0x0199, 0x019B, propertyPVALID}, // LATIN SMALL LETTER K WITH HOOK..LATIN SMALL - {0x019C, 0x019D, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED M..LATIN CAPITAL - {0x019E, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LONG RIGHT LEG - {0x019F, 0x01A0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MIDDLE TILDE..LA - {0x01A1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN - {0x01A2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OI - {0x01A3, 0x0, propertyPVALID}, // LATIN SMALL LETTER OI - {0x01A4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH HOOK - {0x01A5, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH HOOK - {0x01A6, 0x01A7, propertyDISALLOWED}, // LATIN LETTER YR..LATIN CAPITAL LETTER TONE T - {0x01A8, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE TWO - {0x01A9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ESH - {0x01AA, 0x01AB, propertyPVALID}, // LATIN LETTER REVERSED ESH LOOP..LATIN SMALL - {0x01AC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH HOOK - {0x01AD, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH HOOK - {0x01AE, 0x01AF, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH RETROFLEX HOOK.. - {0x01B0, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN - {0x01B1, 0x01B3, propertyDISALLOWED}, // LATIN CAPITAL LETTER UPSILON..LATIN CAPITAL - {0x01B4, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK - {0x01B5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH STROKE - {0x01B6, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH STROKE - {0x01B7, 0x01B8, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH..LATIN CAPITAL LETT - {0x01B9, 0x01BB, propertyPVALID}, // LATIN SMALL LETTER EZH REVERSED..LATIN LETTE - {0x01BC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE FIVE - {0x01BD, 0x01C3, propertyPVALID}, // LATIN SMALL LETTER TONE FIVE..LATIN LETTER R - {0x01C4, 0x01CD, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ WITH CARON..LATIN CA - {0x01CE, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CARON - {0x01CF, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH CARON - {0x01D0, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH CARON - {0x01D1, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CARON - {0x01D2, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CARON - {0x01D3, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CARON - {0x01D4, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CARON - {0x01D5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND MA - {0x01D6, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND MACR - {0x01D7, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND AC - {0x01D8, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND ACUT - {0x01D9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND CA - {0x01DA, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND CARO - {0x01DB, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND GR - {0x01DC, 0x01DD, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND GRAV - {0x01DE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DIAERESIS AND MA - {0x01DF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DIAERESIS AND MACR - {0x01E0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE AND MA - {0x01E1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE AND MACR - {0x01E2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH MACRON - {0x01E3, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH MACRON - {0x01E4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH STROKE - {0x01E5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH STROKE - {0x01E6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CARON - {0x01E7, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CARON - {0x01E8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CARON - {0x01E9, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH CARON - {0x01EA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK - {0x01EB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK - {0x01EC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK AND MACRO - {0x01ED, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK AND MACRON - {0x01EE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH WITH CARON - {0x01EF, 0x01F0, propertyPVALID}, // LATIN SMALL LETTER EZH WITH CARON..LATIN SMA - {0x01F1, 0x01F4, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ..LATIN CAPITAL LETTE - {0x01F5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH ACUTE - {0x01F6, 0x01F8, propertyDISALLOWED}, // LATIN CAPITAL LETTER HWAIR..LATIN CAPITAL LE - {0x01F9, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH GRAVE - {0x01FA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING ABOVE AND A - {0x01FB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING ABOVE AND ACU - {0x01FC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH ACUTE - {0x01FD, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH ACUTE - {0x01FE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH STROKE AND ACUTE - {0x01FF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE AND ACUTE - {0x0200, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOUBLE GRAVE - {0x0201, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOUBLE GRAVE - {0x0202, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH INVERTED BREVE - {0x0203, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH INVERTED BREVE - {0x0204, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOUBLE GRAVE - {0x0205, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOUBLE GRAVE - {0x0206, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH INVERTED BREVE - {0x0207, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH INVERTED BREVE - {0x0208, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOUBLE GRAVE - {0x0209, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOUBLE GRAVE - {0x020A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH INVERTED BREVE - {0x020B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH INVERTED BREVE - {0x020C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE GRAVE - {0x020D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE GRAVE - {0x020E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH INVERTED BREVE - {0x020F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH INVERTED BREVE - {0x0210, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOUBLE GRAVE - {0x0211, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOUBLE GRAVE - {0x0212, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH INVERTED BREVE - {0x0213, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH INVERTED BREVE - {0x0214, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE GRAVE - {0x0215, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE GRAVE - {0x0216, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH INVERTED BREVE - {0x0217, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH INVERTED BREVE - {0x0218, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH COMMA BELOW - {0x0219, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH COMMA BELOW - {0x021A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH COMMA BELOW - {0x021B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH COMMA BELOW - {0x021C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER YOGH - {0x021D, 0x0, propertyPVALID}, // LATIN SMALL LETTER YOGH - {0x021E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CARON - {0x021F, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CARON - {0x0220, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LONG RIGHT LEG - {0x0221, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CURL - {0x0222, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OU - {0x0223, 0x0, propertyPVALID}, // LATIN SMALL LETTER OU - {0x0224, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH HOOK - {0x0225, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH HOOK - {0x0226, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE - {0x0227, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE - {0x0228, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA - {0x0229, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA - {0x022A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DIAERESIS AND MA - {0x022B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DIAERESIS AND MACR - {0x022C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND MACRON - {0x022D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND MACRON - {0x022E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE - {0x022F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE - {0x0230, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE AND MA - {0x0231, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE AND MACR - {0x0232, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH MACRON - {0x0233, 0x0239, propertyPVALID}, // LATIN SMALL LETTER Y WITH MACRON..LATIN SMAL - {0x023A, 0x023B, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH STROKE..LATIN CA - {0x023C, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH STROKE - {0x023D, 0x023E, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH BAR..LATIN CAPIT - {0x023F, 0x0240, propertyPVALID}, // LATIN SMALL LETTER S WITH SWASH TAIL..LATIN - {0x0241, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER GLOTTAL STOP - {0x0242, 0x0, propertyPVALID}, // LATIN SMALL LETTER GLOTTAL STOP - {0x0243, 0x0246, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH STROKE..LATIN CA - {0x0247, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH STROKE - {0x0248, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER J WITH STROKE - {0x0249, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH STROKE - {0x024A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL - {0x024B, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH HOOK TAIL - {0x024C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH STROKE - {0x024D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH STROKE - {0x024E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH STROKE - {0x024F, 0x02AF, propertyPVALID}, // LATIN SMALL LETTER Y WITH STROKE..LATIN SMAL - {0x02B0, 0x02B8, propertyDISALLOWED}, // MODIFIER LETTER SMALL H..MODIFIER LETTER SMA - {0x02B9, 0x02C1, propertyPVALID}, // MODIFIER LETTER PRIME..MODIFIER LETTER REVER - {0x02C2, 0x02C5, propertyDISALLOWED}, // MODIFIER LETTER LEFT ARROWHEAD..MODIFIER LET - {0x02C6, 0x02D1, propertyPVALID}, // MODIFIER LETTER CIRCUMFLEX ACCENT..MODIFIER - {0x02D2, 0x02EB, propertyDISALLOWED}, // MODIFIER LETTER CENTRED RIGHT HALF RING..MOD - {0x02EC, 0x0, propertyPVALID}, // MODIFIER LETTER VOICING - {0x02ED, 0x0, propertyDISALLOWED}, // MODIFIER LETTER UNASPIRATED - {0x02EE, 0x0, propertyPVALID}, // MODIFIER LETTER DOUBLE APOSTROPHE - {0x02EF, 0x02FF, propertyDISALLOWED}, // MODIFIER LETTER LOW DOWN ARROWHEAD..MODIFIER - {0x0300, 0x033F, propertyPVALID}, // COMBINING GRAVE ACCENT..COMBINING DOUBLE OVE - {0x0340, 0x0341, propertyDISALLOWED}, // COMBINING GRAVE TONE MARK..COMBINING ACUTE T - {0x0342, 0x0, propertyPVALID}, // COMBINING GREEK PERISPOMENI - {0x0343, 0x0345, propertyDISALLOWED}, // COMBINING GREEK KORONIS..COMBINING GREEK YPO - {0x0346, 0x034E, propertyPVALID}, // COMBINING BRIDGE ABOVE..COMBINING UPWARDS AR - {0x034F, 0x0, propertyDISALLOWED}, // COMBINING GRAPHEME JOINER - {0x0350, 0x036F, propertyPVALID}, // COMBINING RIGHT ARROWHEAD ABOVE..COMBINING L - {0x0370, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER HETA - {0x0371, 0x0, propertyPVALID}, // GREEK SMALL LETTER HETA - {0x0372, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER ARCHAIC SAMPI - {0x0373, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC SAMPI - {0x0374, 0x0, propertyDISALLOWED}, // GREEK NUMERAL SIGN - {0x0375, 0x0, propertyCONTEXTO}, // GREEK LOWER NUMERAL SIGN - {0x0376, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA - {0x0377, 0x0, propertyPVALID}, // GREEK SMALL LETTER PAMPHYLIAN DIGAMMA - {0x0378, 0x0379, propertyUNASSIGNED}, // .. - {0x037A, 0x0, propertyDISALLOWED}, // GREEK YPOGEGRAMMENI - {0x037B, 0x037D, propertyPVALID}, // GREEK SMALL REVERSED LUNATE SIGMA SYMBOL..GR - {0x037E, 0x0, propertyDISALLOWED}, // GREEK QUESTION MARK - {0x037F, 0x0383, propertyUNASSIGNED}, // .. - {0x0384, 0x038A, propertyDISALLOWED}, // GREEK TONOS..GREEK CAPITAL LETTER IOTA WITH - {0x038B, 0x0, propertyUNASSIGNED}, // - {0x038C, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH TONOS - {0x038D, 0x0, propertyUNASSIGNED}, // - {0x038E, 0x038F, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH TONOS..GRE - {0x0390, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND T - {0x0391, 0x03A1, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA..GREEK CAPITAL LE - {0x03A2, 0x0, propertyUNASSIGNED}, // - {0x03A3, 0x03AB, propertyDISALLOWED}, // GREEK CAPITAL LETTER SIGMA..GREEK CAPITAL LE - {0x03AC, 0x03CE, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH TONOS..GREEK S - {0x03CF, 0x03D6, propertyDISALLOWED}, // GREEK CAPITAL KAI SYMBOL..GREEK PI SYMBOL - {0x03D7, 0x0, propertyPVALID}, // GREEK KAI SYMBOL - {0x03D8, 0x0, propertyDISALLOWED}, // GREEK LETTER ARCHAIC KOPPA - {0x03D9, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC KOPPA - {0x03DA, 0x0, propertyDISALLOWED}, // GREEK LETTER STIGMA - {0x03DB, 0x0, propertyPVALID}, // GREEK SMALL LETTER STIGMA - {0x03DC, 0x0, propertyDISALLOWED}, // GREEK LETTER DIGAMMA - {0x03DD, 0x0, propertyPVALID}, // GREEK SMALL LETTER DIGAMMA - {0x03DE, 0x0, propertyDISALLOWED}, // GREEK LETTER KOPPA - {0x03DF, 0x0, propertyPVALID}, // GREEK SMALL LETTER KOPPA - {0x03E0, 0x0, propertyDISALLOWED}, // GREEK LETTER SAMPI - {0x03E1, 0x0, propertyPVALID}, // GREEK SMALL LETTER SAMPI - {0x03E2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHEI - {0x03E3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHEI - {0x03E4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FEI - {0x03E5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FEI - {0x03E6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHEI - {0x03E7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHEI - {0x03E8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HORI - {0x03E9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HORI - {0x03EA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GANGIA - {0x03EB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GANGIA - {0x03EC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHIMA - {0x03ED, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHIMA - {0x03EE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DEI - {0x03EF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DEI - {0x03F0, 0x03F2, propertyDISALLOWED}, // GREEK KAPPA SYMBOL..GREEK LUNATE SIGMA SYMBO - {0x03F3, 0x0, propertyPVALID}, // GREEK LETTER YOT - {0x03F4, 0x03F7, propertyDISALLOWED}, // GREEK CAPITAL THETA SYMBOL..GREEK CAPITAL LE - {0x03F8, 0x0, propertyPVALID}, // GREEK SMALL LETTER SHO - {0x03F9, 0x03FA, propertyDISALLOWED}, // GREEK CAPITAL LUNATE SIGMA SYMBOL..GREEK CAP - {0x03FB, 0x03FC, propertyPVALID}, // GREEK SMALL LETTER SAN..GREEK RHO WITH STROK - {0x03FD, 0x042F, propertyDISALLOWED}, // GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL.. - {0x0430, 0x045F, propertyPVALID}, // CYRILLIC SMALL LETTER A..CYRILLIC SMALL LETT - {0x0460, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA - {0x0461, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA - {0x0462, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAT - {0x0463, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAT - {0x0464, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED E - {0x0465, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED E - {0x0466, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LITTLE YUS - {0x0467, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LITTLE YUS - {0x0468, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS - {0x0469, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED LITTLE YUS - {0x046A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BIG YUS - {0x046B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BIG YUS - {0x046C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS - {0x046D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED BIG YUS - {0x046E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KSI - {0x046F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KSI - {0x0470, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PSI - {0x0471, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PSI - {0x0472, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER FITA - {0x0473, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER FITA - {0x0474, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA - {0x0475, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA - {0x0476, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE - {0x0477, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GR - {0x0478, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER UK - {0x0479, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER UK - {0x047A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ROUND OMEGA - {0x047B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ROUND OMEGA - {0x047C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA WITH TITLO - {0x047D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA WITH TITLO - {0x047E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OT - {0x047F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OT - {0x0480, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOPPA - {0x0481, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOPPA - {0x0482, 0x0, propertyDISALLOWED}, // CYRILLIC THOUSANDS SIGN - {0x0483, 0x0487, propertyPVALID}, // COMBINING CYRILLIC TITLO..COMBINING CYRILLIC - {0x0488, 0x048A, propertyDISALLOWED}, // COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..C - {0x048B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHORT I WITH TAIL - {0x048C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SEMISOFT SIGN - {0x048D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SEMISOFT SIGN - {0x048E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ER WITH TICK - {0x048F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ER WITH TICK - {0x0490, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH UPTURN - {0x0491, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH UPTURN - {0x0492, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE - {0x0493, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE - {0x0494, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK - {0x0495, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH MIDDLE HOOK - {0x0496, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER - {0x0497, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DESCENDER - {0x0498, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DESCENDER - {0x0499, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DESCENDER - {0x049A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH DESCENDER - {0x049B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH DESCENDER - {0x049C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH VERTICAL STR - {0x049D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH VERTICAL STROK - {0x049E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH STROKE - {0x049F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH STROKE - {0x04A0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BASHKIR KA - {0x04A1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BASHKIR KA - {0x04A2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH DESCENDER - {0x04A3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH DESCENDER - {0x04A4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE EN GHE - {0x04A5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE EN GHE - {0x04A6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK - {0x04A7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH MIDDLE HOOK - {0x04A8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN HA - {0x04A9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN HA - {0x04AA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ES WITH DESCENDER - {0x04AB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ES WITH DESCENDER - {0x04AC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH DESCENDER - {0x04AD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH DESCENDER - {0x04AE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U - {0x04AF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U - {0x04B0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U WITH STRO - {0x04B1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE - {0x04B2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH DESCENDER - {0x04B3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH DESCENDER - {0x04B4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE TE TSE - {0x04B5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE TE TSE - {0x04B6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DESCENDER - {0x04B7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DESCENDER - {0x04B8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH VERTICAL ST - {0x04B9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH VERTICAL STRO - {0x04BA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHHA - {0x04BB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHHA - {0x04BC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE - {0x04BD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE - {0x04BE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH D - {0x04BF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE WITH DES - {0x04C0, 0x04C1, propertyDISALLOWED}, // CYRILLIC LETTER PALOCHKA..CYRILLIC CAPITAL L - {0x04C2, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH BREVE - {0x04C3, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH HOOK - {0x04C4, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH HOOK - {0x04C5, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH TAIL - {0x04C6, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH TAIL - {0x04C7, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH HOOK - {0x04C8, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH HOOK - {0x04C9, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH TAIL - {0x04CA, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH TAIL - {0x04CB, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KHAKASSIAN CHE - {0x04CC, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KHAKASSIAN CHE - {0x04CD, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EM WITH TAIL - {0x04CE, 0x04CF, propertyPVALID}, // CYRILLIC SMALL LETTER EM WITH TAIL..CYRILLIC - {0x04D0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH BREVE - {0x04D1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH BREVE - {0x04D2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH DIAERESIS - {0x04D3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH DIAERESIS - {0x04D4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE A IE - {0x04D5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE A IE - {0x04D6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IE WITH BREVE - {0x04D7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IE WITH BREVE - {0x04D8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA - {0x04D9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA - {0x04DA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS - {0x04DB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS - {0x04DC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS - {0x04DD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DIAERESIS - {0x04DE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS - {0x04DF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DIAERESIS - {0x04E0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN DZE - {0x04E1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN DZE - {0x04E2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH MACRON - {0x04E3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH MACRON - {0x04E4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH DIAERESIS - {0x04E5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH DIAERESIS - {0x04E6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER O WITH DIAERESIS - {0x04E7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER O WITH DIAERESIS - {0x04E8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O - {0x04E9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O - {0x04EA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O WITH DIAERE - {0x04EB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O WITH DIAERESI - {0x04EC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER E WITH DIAERESIS - {0x04ED, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER E WITH DIAERESIS - {0x04EE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH MACRON - {0x04EF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH MACRON - {0x04F0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DIAERESIS - {0x04F1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DIAERESIS - {0x04F2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE - {0x04F3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE - {0x04F4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS - {0x04F5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DIAERESIS - {0x04F6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH DESCENDER - {0x04F7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH DESCENDER - {0x04F8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS - {0x04F9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH DIAERESIS - {0x04FA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE AND - {0x04FB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE AND HO - {0x04FC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH HOOK - {0x04FD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH HOOK - {0x04FE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH STROKE - {0x04FF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH STROKE - {0x0500, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DE - {0x0501, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DE - {0x0502, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DJE - {0x0503, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DJE - {0x0504, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI ZJE - {0x0505, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI ZJE - {0x0506, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DZJE - {0x0507, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DZJE - {0x0508, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI LJE - {0x0509, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI LJE - {0x050A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI NJE - {0x050B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI NJE - {0x050C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI SJE - {0x050D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI SJE - {0x050E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI TJE - {0x050F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI TJE - {0x0510, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED ZE - {0x0511, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED ZE - {0x0512, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH HOOK - {0x0513, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH HOOK - {0x0514, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LHA - {0x0515, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LHA - {0x0516, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER RHA - {0x0517, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER RHA - {0x0518, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAE - {0x0519, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAE - {0x051A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER QA - {0x051B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER QA - {0x051C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER WE - {0x051D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER WE - {0x051E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ALEUT KA - {0x051F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ALEUT KA - {0x0520, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK - {0x0521, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH MIDDLE HOOK - {0x0522, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK - {0x0523, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH MIDDLE HOOK - {0x0524, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH DESCENDER - {0x0525, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH DESCENDER - {0x0526, 0x0530, propertyUNASSIGNED}, // .. - {0x0531, 0x0556, propertyDISALLOWED}, // ARMENIAN CAPITAL LETTER AYB..ARMENIAN CAPITA - {0x0557, 0x0558, propertyUNASSIGNED}, // .. - {0x0559, 0x0, propertyPVALID}, // ARMENIAN MODIFIER LETTER LEFT HALF RING - {0x055A, 0x055F, propertyDISALLOWED}, // ARMENIAN APOSTROPHE..ARMENIAN ABBREVIATION M - {0x0560, 0x0, propertyUNASSIGNED}, // - {0x0561, 0x0586, propertyPVALID}, // ARMENIAN SMALL LETTER AYB..ARMENIAN SMALL LE - {0x0587, 0x0, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE ECH YIWN - {0x0588, 0x0, propertyUNASSIGNED}, // - {0x0589, 0x058A, propertyDISALLOWED}, // ARMENIAN FULL STOP..ARMENIAN HYPHEN - {0x058B, 0x0590, propertyUNASSIGNED}, // .. - {0x0591, 0x05BD, propertyPVALID}, // HEBREW ACCENT ETNAHTA..HEBREW POINT METEG - {0x05BE, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION MAQAF - {0x05BF, 0x0, propertyPVALID}, // HEBREW POINT RAFE - {0x05C0, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION PASEQ - {0x05C1, 0x05C2, propertyPVALID}, // HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT - {0x05C3, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION SOF PASUQ - {0x05C4, 0x05C5, propertyPVALID}, // HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT - {0x05C6, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION NUN HAFUKHA - {0x05C7, 0x0, propertyPVALID}, // HEBREW POINT QAMATS QATAN - {0x05C8, 0x05CF, propertyUNASSIGNED}, // .. - {0x05D0, 0x05EA, propertyPVALID}, // HEBREW LETTER ALEF..HEBREW LETTER TAV - {0x05EB, 0x05EF, propertyUNASSIGNED}, // .. - {0x05F0, 0x05F2, propertyPVALID}, // HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW L - {0x05F3, 0x05F4, propertyCONTEXTO}, // HEBREW PUNCTUATION GERESH..HEBREW PUNCTUATIO - {0x05F5, 0x05FF, propertyUNASSIGNED}, // .. - {0x0600, 0x0603, propertyDISALLOWED}, // ARABIC NUMBER SIGN..ARABIC SIGN SAFHA - {0x0604, 0x0605, propertyUNASSIGNED}, // .. - {0x0606, 0x060F, propertyDISALLOWED}, // ARABIC-INDIC CUBE ROOT..ARABIC SIGN MISRA - {0x0610, 0x061A, propertyPVALID}, // ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..AR - {0x061B, 0x0, propertyDISALLOWED}, // ARABIC SEMICOLON - {0x061C, 0x061D, propertyUNASSIGNED}, // .. - {0x061E, 0x061F, propertyDISALLOWED}, // ARABIC TRIPLE DOT PUNCTUATION MARK..ARABIC Q - {0x0620, 0x0, propertyUNASSIGNED}, // - {0x0621, 0x063F, propertyPVALID}, // ARABIC LETTER HAMZA..ARABIC LETTER FARSI YEH - {0x0640, 0x0, propertyDISALLOWED}, // ARABIC TATWEEL - {0x0641, 0x065E, propertyPVALID}, // ARABIC LETTER FEH..ARABIC FATHA WITH TWO DOT - {0x065F, 0x0, propertyUNASSIGNED}, // - {0x0660, 0x0669, propertyCONTEXTO}, // ARABIC-INDIC DIGIT ZERO..ARABIC-INDIC DIGIT - {0x066A, 0x066D, propertyDISALLOWED}, // ARABIC PERCENT SIGN..ARABIC FIVE POINTED STA - {0x066E, 0x0674, propertyPVALID}, // ARABIC LETTER DOTLESS BEH..ARABIC LETTER HIG - {0x0675, 0x0678, propertyDISALLOWED}, // ARABIC LETTER HIGH HAMZA ALEF..ARABIC LETTER - {0x0679, 0x06D3, propertyPVALID}, // ARABIC LETTER TTEH..ARABIC LETTER YEH BARREE - {0x06D4, 0x0, propertyDISALLOWED}, // ARABIC FULL STOP - {0x06D5, 0x06DC, propertyPVALID}, // ARABIC LETTER AE..ARABIC SMALL HIGH SEEN - {0x06DD, 0x06DE, propertyDISALLOWED}, // ARABIC END OF AYAH..ARABIC START OF RUB EL H - {0x06DF, 0x06E8, propertyPVALID}, // ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL - {0x06E9, 0x0, propertyDISALLOWED}, // ARABIC PLACE OF SAJDAH - {0x06EA, 0x06EF, propertyPVALID}, // ARABIC EMPTY CENTRE LOW STOP..ARABIC LETTER - {0x06F0, 0x06F9, propertyCONTEXTO}, // EXTENDED ARABIC-INDIC DIGIT ZERO..EXTENDED A - {0x06FA, 0x06FF, propertyPVALID}, // ARABIC LETTER SHEEN WITH DOT BELOW..ARABIC L - {0x0700, 0x070D, propertyDISALLOWED}, // SYRIAC END OF PARAGRAPH..SYRIAC HARKLEAN AST - {0x070E, 0x0, propertyUNASSIGNED}, // - {0x070F, 0x0, propertyDISALLOWED}, // SYRIAC ABBREVIATION MARK - {0x0710, 0x074A, propertyPVALID}, // SYRIAC LETTER ALAPH..SYRIAC BARREKH - {0x074B, 0x074C, propertyUNASSIGNED}, // .. - {0x074D, 0x07B1, propertyPVALID}, // SYRIAC LETTER SOGDIAN ZHAIN..THAANA LETTER N - {0x07B2, 0x07BF, propertyUNASSIGNED}, // .. - {0x07C0, 0x07F5, propertyPVALID}, // NKO DIGIT ZERO..NKO LOW TONE APOSTROPHE - {0x07F6, 0x07FA, propertyDISALLOWED}, // NKO SYMBOL OO DENNEN..NKO LAJANYALAN - {0x07FB, 0x07FF, propertyUNASSIGNED}, // .. - {0x0800, 0x082D, propertyPVALID}, // SAMARITAN LETTER ALAF..SAMARITAN MARK NEQUDA - {0x082E, 0x082F, propertyUNASSIGNED}, // .. - {0x0830, 0x083E, propertyDISALLOWED}, // SAMARITAN PUNCTUATION NEQUDAA..SAMARITAN PUN - {0x083F, 0x08FF, propertyUNASSIGNED}, // .. - {0x0900, 0x0939, propertyPVALID}, // DEVANAGARI SIGN INVERTED CANDRABINDU..DEVANA - {0x093A, 0x093B, propertyUNASSIGNED}, // .. - {0x093C, 0x094E, propertyPVALID}, // DEVANAGARI SIGN NUKTA..DEVANAGARI VOWEL SIGN - {0x094F, 0x0, propertyUNASSIGNED}, // - {0x0950, 0x0955, propertyPVALID}, // DEVANAGARI OM..DEVANAGARI VOWEL SIGN CANDRA - {0x0956, 0x0957, propertyUNASSIGNED}, // .. - {0x0958, 0x095F, propertyDISALLOWED}, // DEVANAGARI LETTER QA..DEVANAGARI LETTER YYA - {0x0960, 0x0963, propertyPVALID}, // DEVANAGARI LETTER VOCALIC RR..DEVANAGARI VOW - {0x0964, 0x0965, propertyDISALLOWED}, // DEVANAGARI DANDA..DEVANAGARI DOUBLE DANDA - {0x0966, 0x096F, propertyPVALID}, // DEVANAGARI DIGIT ZERO..DEVANAGARI DIGIT NINE - {0x0970, 0x0, propertyDISALLOWED}, // DEVANAGARI ABBREVIATION SIGN - {0x0971, 0x0972, propertyPVALID}, // DEVANAGARI SIGN HIGH SPACING DOT..DEVANAGARI - {0x0973, 0x0978, propertyUNASSIGNED}, // .. - {0x0979, 0x097F, propertyPVALID}, // DEVANAGARI LETTER ZHA..DEVANAGARI LETTER BBA - {0x0980, 0x0, propertyUNASSIGNED}, // - {0x0981, 0x0983, propertyPVALID}, // BENGALI SIGN CANDRABINDU..BENGALI SIGN VISAR - {0x0984, 0x0, propertyUNASSIGNED}, // - {0x0985, 0x098C, propertyPVALID}, // BENGALI LETTER A..BENGALI LETTER VOCALIC L - {0x098D, 0x098E, propertyUNASSIGNED}, // .. - {0x098F, 0x0990, propertyPVALID}, // BENGALI LETTER E..BENGALI LETTER AI - {0x0991, 0x0992, propertyUNASSIGNED}, // .. - {0x0993, 0x09A8, propertyPVALID}, // BENGALI LETTER O..BENGALI LETTER NA - {0x09A9, 0x0, propertyUNASSIGNED}, // - {0x09AA, 0x09B0, propertyPVALID}, // BENGALI LETTER PA..BENGALI LETTER RA - {0x09B1, 0x0, propertyUNASSIGNED}, // - {0x09B2, 0x0, propertyPVALID}, // BENGALI LETTER LA - {0x09B3, 0x09B5, propertyUNASSIGNED}, // .. - {0x09B6, 0x09B9, propertyPVALID}, // BENGALI LETTER SHA..BENGALI LETTER HA - {0x09BA, 0x09BB, propertyUNASSIGNED}, // .. - {0x09BC, 0x09C4, propertyPVALID}, // BENGALI SIGN NUKTA..BENGALI VOWEL SIGN VOCAL - {0x09C5, 0x09C6, propertyUNASSIGNED}, // .. - {0x09C7, 0x09C8, propertyPVALID}, // BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI - {0x09C9, 0x09CA, propertyUNASSIGNED}, // .. - {0x09CB, 0x09CE, propertyPVALID}, // BENGALI VOWEL SIGN O..BENGALI LETTER KHANDA - {0x09CF, 0x09D6, propertyUNASSIGNED}, // .. - {0x09D7, 0x0, propertyPVALID}, // BENGALI AU LENGTH MARK - {0x09D8, 0x09DB, propertyUNASSIGNED}, // .. - {0x09DC, 0x09DD, propertyDISALLOWED}, // BENGALI LETTER RRA..BENGALI LETTER RHA - {0x09DE, 0x0, propertyUNASSIGNED}, // - {0x09DF, 0x0, propertyDISALLOWED}, // BENGALI LETTER YYA - {0x09E0, 0x09E3, propertyPVALID}, // BENGALI LETTER VOCALIC RR..BENGALI VOWEL SIG - {0x09E4, 0x09E5, propertyUNASSIGNED}, // .. - {0x09E6, 0x09F1, propertyPVALID}, // BENGALI DIGIT ZERO..BENGALI LETTER RA WITH L - {0x09F2, 0x09FB, propertyDISALLOWED}, // BENGALI RUPEE MARK..BENGALI GANDA MARK - {0x09FC, 0x0A00, propertyUNASSIGNED}, // .. - {0x0A01, 0x0A03, propertyPVALID}, // GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN VISA - {0x0A04, 0x0, propertyUNASSIGNED}, // - {0x0A05, 0x0A0A, propertyPVALID}, // GURMUKHI LETTER A..GURMUKHI LETTER UU - {0x0A0B, 0x0A0E, propertyUNASSIGNED}, // .. - {0x0A0F, 0x0A10, propertyPVALID}, // GURMUKHI LETTER EE..GURMUKHI LETTER AI - {0x0A11, 0x0A12, propertyUNASSIGNED}, // .. - {0x0A13, 0x0A28, propertyPVALID}, // GURMUKHI LETTER OO..GURMUKHI LETTER NA - {0x0A29, 0x0, propertyUNASSIGNED}, // - {0x0A2A, 0x0A30, propertyPVALID}, // GURMUKHI LETTER PA..GURMUKHI LETTER RA - {0x0A31, 0x0, propertyUNASSIGNED}, // - {0x0A32, 0x0, propertyPVALID}, // GURMUKHI LETTER LA - {0x0A33, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER LLA - {0x0A34, 0x0, propertyUNASSIGNED}, // - {0x0A35, 0x0, propertyPVALID}, // GURMUKHI LETTER VA - {0x0A36, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER SHA - {0x0A37, 0x0, propertyUNASSIGNED}, // - {0x0A38, 0x0A39, propertyPVALID}, // GURMUKHI LETTER SA..GURMUKHI LETTER HA - {0x0A3A, 0x0A3B, propertyUNASSIGNED}, // .. - {0x0A3C, 0x0, propertyPVALID}, // GURMUKHI SIGN NUKTA - {0x0A3D, 0x0, propertyUNASSIGNED}, // - {0x0A3E, 0x0A42, propertyPVALID}, // GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN - {0x0A43, 0x0A46, propertyUNASSIGNED}, // .. - {0x0A47, 0x0A48, propertyPVALID}, // GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN - {0x0A49, 0x0A4A, propertyUNASSIGNED}, // .. - {0x0A4B, 0x0A4D, propertyPVALID}, // GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA - {0x0A4E, 0x0A50, propertyUNASSIGNED}, // .. - {0x0A51, 0x0, propertyPVALID}, // GURMUKHI SIGN UDAAT - {0x0A52, 0x0A58, propertyUNASSIGNED}, // .. - {0x0A59, 0x0A5B, propertyDISALLOWED}, // GURMUKHI LETTER KHHA..GURMUKHI LETTER ZA - {0x0A5C, 0x0, propertyPVALID}, // GURMUKHI LETTER RRA - {0x0A5D, 0x0, propertyUNASSIGNED}, // - {0x0A5E, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER FA - {0x0A5F, 0x0A65, propertyUNASSIGNED}, // .. - {0x0A66, 0x0A75, propertyPVALID}, // GURMUKHI DIGIT ZERO..GURMUKHI SIGN YAKASH - {0x0A76, 0x0A80, propertyUNASSIGNED}, // .. - {0x0A81, 0x0A83, propertyPVALID}, // GUJARATI SIGN CANDRABINDU..GUJARATI SIGN VIS - {0x0A84, 0x0, propertyUNASSIGNED}, // - {0x0A85, 0x0A8D, propertyPVALID}, // GUJARATI LETTER A..GUJARATI VOWEL CANDRA E - {0x0A8E, 0x0, propertyUNASSIGNED}, // - {0x0A8F, 0x0A91, propertyPVALID}, // GUJARATI LETTER E..GUJARATI VOWEL CANDRA O - {0x0A92, 0x0, propertyUNASSIGNED}, // - {0x0A93, 0x0AA8, propertyPVALID}, // GUJARATI LETTER O..GUJARATI LETTER NA - {0x0AA9, 0x0, propertyUNASSIGNED}, // - {0x0AAA, 0x0AB0, propertyPVALID}, // GUJARATI LETTER PA..GUJARATI LETTER RA - {0x0AB1, 0x0, propertyUNASSIGNED}, // - {0x0AB2, 0x0AB3, propertyPVALID}, // GUJARATI LETTER LA..GUJARATI LETTER LLA - {0x0AB4, 0x0, propertyUNASSIGNED}, // - {0x0AB5, 0x0AB9, propertyPVALID}, // GUJARATI LETTER VA..GUJARATI LETTER HA - {0x0ABA, 0x0ABB, propertyUNASSIGNED}, // .. - {0x0ABC, 0x0AC5, propertyPVALID}, // GUJARATI SIGN NUKTA..GUJARATI VOWEL SIGN CAN - {0x0AC6, 0x0, propertyUNASSIGNED}, // - {0x0AC7, 0x0AC9, propertyPVALID}, // GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN C - {0x0ACA, 0x0, propertyUNASSIGNED}, // - {0x0ACB, 0x0ACD, propertyPVALID}, // GUJARATI VOWEL SIGN O..GUJARATI SIGN VIRAMA - {0x0ACE, 0x0ACF, propertyUNASSIGNED}, // .. - {0x0AD0, 0x0, propertyPVALID}, // GUJARATI OM - {0x0AD1, 0x0ADF, propertyUNASSIGNED}, // .. - {0x0AE0, 0x0AE3, propertyPVALID}, // GUJARATI LETTER VOCALIC RR..GUJARATI VOWEL S - {0x0AE4, 0x0AE5, propertyUNASSIGNED}, // .. - {0x0AE6, 0x0AEF, propertyPVALID}, // GUJARATI DIGIT ZERO..GUJARATI DIGIT NINE - {0x0AF0, 0x0, propertyUNASSIGNED}, // - {0x0AF1, 0x0, propertyDISALLOWED}, // GUJARATI RUPEE SIGN - {0x0AF2, 0x0B00, propertyUNASSIGNED}, // .. - {0x0B01, 0x0B03, propertyPVALID}, // ORIYA SIGN CANDRABINDU..ORIYA SIGN VISARGA - {0x0B04, 0x0, propertyUNASSIGNED}, // - {0x0B05, 0x0B0C, propertyPVALID}, // ORIYA LETTER A..ORIYA LETTER VOCALIC L - {0x0B0D, 0x0B0E, propertyUNASSIGNED}, // .. - {0x0B0F, 0x0B10, propertyPVALID}, // ORIYA LETTER E..ORIYA LETTER AI - {0x0B11, 0x0B12, propertyUNASSIGNED}, // .. - {0x0B13, 0x0B28, propertyPVALID}, // ORIYA LETTER O..ORIYA LETTER NA - {0x0B29, 0x0, propertyUNASSIGNED}, // - {0x0B2A, 0x0B30, propertyPVALID}, // ORIYA LETTER PA..ORIYA LETTER RA - {0x0B31, 0x0, propertyUNASSIGNED}, // - {0x0B32, 0x0B33, propertyPVALID}, // ORIYA LETTER LA..ORIYA LETTER LLA - {0x0B34, 0x0, propertyUNASSIGNED}, // - {0x0B35, 0x0B39, propertyPVALID}, // ORIYA LETTER VA..ORIYA LETTER HA - {0x0B3A, 0x0B3B, propertyUNASSIGNED}, // .. - {0x0B3C, 0x0B44, propertyPVALID}, // ORIYA SIGN NUKTA..ORIYA VOWEL SIGN VOCALIC R - {0x0B45, 0x0B46, propertyUNASSIGNED}, // .. - {0x0B47, 0x0B48, propertyPVALID}, // ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI - {0x0B49, 0x0B4A, propertyUNASSIGNED}, // .. - {0x0B4B, 0x0B4D, propertyPVALID}, // ORIYA VOWEL SIGN O..ORIYA SIGN VIRAMA - {0x0B4E, 0x0B55, propertyUNASSIGNED}, // .. - {0x0B56, 0x0B57, propertyPVALID}, // ORIYA AI LENGTH MARK..ORIYA AU LENGTH MARK - {0x0B58, 0x0B5B, propertyUNASSIGNED}, // .. - {0x0B5C, 0x0B5D, propertyDISALLOWED}, // ORIYA LETTER RRA..ORIYA LETTER RHA - {0x0B5E, 0x0, propertyUNASSIGNED}, // - {0x0B5F, 0x0B63, propertyPVALID}, // ORIYA LETTER YYA..ORIYA VOWEL SIGN VOCALIC L - {0x0B64, 0x0B65, propertyUNASSIGNED}, // .. - {0x0B66, 0x0B6F, propertyPVALID}, // ORIYA DIGIT ZERO..ORIYA DIGIT NINE - {0x0B70, 0x0, propertyDISALLOWED}, // ORIYA ISSHAR - {0x0B71, 0x0, propertyPVALID}, // ORIYA LETTER WA - {0x0B72, 0x0B81, propertyUNASSIGNED}, // .. - {0x0B82, 0x0B83, propertyPVALID}, // TAMIL SIGN ANUSVARA..TAMIL SIGN VISARGA - {0x0B84, 0x0, propertyUNASSIGNED}, // - {0x0B85, 0x0B8A, propertyPVALID}, // TAMIL LETTER A..TAMIL LETTER UU - {0x0B8B, 0x0B8D, propertyUNASSIGNED}, // .. - {0x0B8E, 0x0B90, propertyPVALID}, // TAMIL LETTER E..TAMIL LETTER AI - {0x0B91, 0x0, propertyUNASSIGNED}, // - {0x0B92, 0x0B95, propertyPVALID}, // TAMIL LETTER O..TAMIL LETTER KA - {0x0B96, 0x0B98, propertyUNASSIGNED}, // .. - {0x0B99, 0x0B9A, propertyPVALID}, // TAMIL LETTER NGA..TAMIL LETTER CA - {0x0B9B, 0x0, propertyUNASSIGNED}, // - {0x0B9C, 0x0, propertyPVALID}, // TAMIL LETTER JA - {0x0B9D, 0x0, propertyUNASSIGNED}, // - {0x0B9E, 0x0B9F, propertyPVALID}, // TAMIL LETTER NYA..TAMIL LETTER TTA - {0x0BA0, 0x0BA2, propertyUNASSIGNED}, // .. - {0x0BA3, 0x0BA4, propertyPVALID}, // TAMIL LETTER NNA..TAMIL LETTER TA - {0x0BA5, 0x0BA7, propertyUNASSIGNED}, // .. - {0x0BA8, 0x0BAA, propertyPVALID}, // TAMIL LETTER NA..TAMIL LETTER PA - {0x0BAB, 0x0BAD, propertyUNASSIGNED}, // .. - {0x0BAE, 0x0BB9, propertyPVALID}, // TAMIL LETTER MA..TAMIL LETTER HA - {0x0BBA, 0x0BBD, propertyUNASSIGNED}, // .. - {0x0BBE, 0x0BC2, propertyPVALID}, // TAMIL VOWEL SIGN AA..TAMIL VOWEL SIGN UU - {0x0BC3, 0x0BC5, propertyUNASSIGNED}, // .. - {0x0BC6, 0x0BC8, propertyPVALID}, // TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI - {0x0BC9, 0x0, propertyUNASSIGNED}, // - {0x0BCA, 0x0BCD, propertyPVALID}, // TAMIL VOWEL SIGN O..TAMIL SIGN VIRAMA - {0x0BCE, 0x0BCF, propertyUNASSIGNED}, // .. - {0x0BD0, 0x0, propertyPVALID}, // TAMIL OM - {0x0BD1, 0x0BD6, propertyUNASSIGNED}, // .. - {0x0BD7, 0x0, propertyPVALID}, // TAMIL AU LENGTH MARK - {0x0BD8, 0x0BE5, propertyUNASSIGNED}, // .. - {0x0BE6, 0x0BEF, propertyPVALID}, // TAMIL DIGIT ZERO..TAMIL DIGIT NINE - {0x0BF0, 0x0BFA, propertyDISALLOWED}, // TAMIL NUMBER TEN..TAMIL NUMBER SIGN - {0x0BFB, 0x0C00, propertyUNASSIGNED}, // .. - {0x0C01, 0x0C03, propertyPVALID}, // TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA - {0x0C04, 0x0, propertyUNASSIGNED}, // - {0x0C05, 0x0C0C, propertyPVALID}, // TELUGU LETTER A..TELUGU LETTER VOCALIC L - {0x0C0D, 0x0, propertyUNASSIGNED}, // - {0x0C0E, 0x0C10, propertyPVALID}, // TELUGU LETTER E..TELUGU LETTER AI - {0x0C11, 0x0, propertyUNASSIGNED}, // - {0x0C12, 0x0C28, propertyPVALID}, // TELUGU LETTER O..TELUGU LETTER NA - {0x0C29, 0x0, propertyUNASSIGNED}, // - {0x0C2A, 0x0C33, propertyPVALID}, // TELUGU LETTER PA..TELUGU LETTER LLA - {0x0C34, 0x0, propertyUNASSIGNED}, // - {0x0C35, 0x0C39, propertyPVALID}, // TELUGU LETTER VA..TELUGU LETTER HA - {0x0C3A, 0x0C3C, propertyUNASSIGNED}, // .. - {0x0C3D, 0x0C44, propertyPVALID}, // TELUGU SIGN AVAGRAHA..TELUGU VOWEL SIGN VOCA - {0x0C45, 0x0, propertyUNASSIGNED}, // - {0x0C46, 0x0C48, propertyPVALID}, // TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI - {0x0C49, 0x0, propertyUNASSIGNED}, // - {0x0C4A, 0x0C4D, propertyPVALID}, // TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA - {0x0C4E, 0x0C54, propertyUNASSIGNED}, // .. - {0x0C55, 0x0C56, propertyPVALID}, // TELUGU LENGTH MARK..TELUGU AI LENGTH MARK - {0x0C57, 0x0, propertyUNASSIGNED}, // - {0x0C58, 0x0C59, propertyPVALID}, // TELUGU LETTER TSA..TELUGU LETTER DZA - {0x0C5A, 0x0C5F, propertyUNASSIGNED}, // .. - {0x0C60, 0x0C63, propertyPVALID}, // TELUGU LETTER VOCALIC RR..TELUGU VOWEL SIGN - {0x0C64, 0x0C65, propertyUNASSIGNED}, // .. - {0x0C66, 0x0C6F, propertyPVALID}, // TELUGU DIGIT ZERO..TELUGU DIGIT NINE - {0x0C70, 0x0C77, propertyUNASSIGNED}, // .. - {0x0C78, 0x0C7F, propertyDISALLOWED}, // TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF - {0x0C80, 0x0C81, propertyUNASSIGNED}, // .. - {0x0C82, 0x0C83, propertyPVALID}, // KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA - {0x0C84, 0x0, propertyUNASSIGNED}, // - {0x0C85, 0x0C8C, propertyPVALID}, // KANNADA LETTER A..KANNADA LETTER VOCALIC L - {0x0C8D, 0x0, propertyUNASSIGNED}, // - {0x0C8E, 0x0C90, propertyPVALID}, // KANNADA LETTER E..KANNADA LETTER AI - {0x0C91, 0x0, propertyUNASSIGNED}, // - {0x0C92, 0x0CA8, propertyPVALID}, // KANNADA LETTER O..KANNADA LETTER NA - {0x0CA9, 0x0, propertyUNASSIGNED}, // - {0x0CAA, 0x0CB3, propertyPVALID}, // KANNADA LETTER PA..KANNADA LETTER LLA - {0x0CB4, 0x0, propertyUNASSIGNED}, // - {0x0CB5, 0x0CB9, propertyPVALID}, // KANNADA LETTER VA..KANNADA LETTER HA - {0x0CBA, 0x0CBB, propertyUNASSIGNED}, // .. - {0x0CBC, 0x0CC4, propertyPVALID}, // KANNADA SIGN NUKTA..KANNADA VOWEL SIGN VOCAL - {0x0CC5, 0x0, propertyUNASSIGNED}, // - {0x0CC6, 0x0CC8, propertyPVALID}, // KANNADA VOWEL SIGN E..KANNADA VOWEL SIGN AI - {0x0CC9, 0x0, propertyUNASSIGNED}, // - {0x0CCA, 0x0CCD, propertyPVALID}, // KANNADA VOWEL SIGN O..KANNADA SIGN VIRAMA - {0x0CCE, 0x0CD4, propertyUNASSIGNED}, // .. - {0x0CD5, 0x0CD6, propertyPVALID}, // KANNADA LENGTH MARK..KANNADA AI LENGTH MARK - {0x0CD7, 0x0CDD, propertyUNASSIGNED}, // .. - {0x0CDE, 0x0, propertyPVALID}, // KANNADA LETTER FA - {0x0CDF, 0x0, propertyUNASSIGNED}, // - {0x0CE0, 0x0CE3, propertyPVALID}, // KANNADA LETTER VOCALIC RR..KANNADA VOWEL SIG - {0x0CE4, 0x0CE5, propertyUNASSIGNED}, // .. - {0x0CE6, 0x0CEF, propertyPVALID}, // KANNADA DIGIT ZERO..KANNADA DIGIT NINE - {0x0CF0, 0x0, propertyUNASSIGNED}, // - {0x0CF1, 0x0CF2, propertyDISALLOWED}, // KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADH - {0x0CF3, 0x0D01, propertyUNASSIGNED}, // .. - {0x0D02, 0x0D03, propertyPVALID}, // MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISA - {0x0D04, 0x0, propertyUNASSIGNED}, // - {0x0D05, 0x0D0C, propertyPVALID}, // MALAYALAM LETTER A..MALAYALAM LETTER VOCALIC - {0x0D0D, 0x0, propertyUNASSIGNED}, // - {0x0D0E, 0x0D10, propertyPVALID}, // MALAYALAM LETTER E..MALAYALAM LETTER AI - {0x0D11, 0x0, propertyUNASSIGNED}, // - {0x0D12, 0x0D28, propertyPVALID}, // MALAYALAM LETTER O..MALAYALAM LETTER NA - {0x0D29, 0x0, propertyUNASSIGNED}, // - {0x0D2A, 0x0D39, propertyPVALID}, // MALAYALAM LETTER PA..MALAYALAM LETTER HA - {0x0D3A, 0x0D3C, propertyUNASSIGNED}, // .. - {0x0D3D, 0x0D44, propertyPVALID}, // MALAYALAM SIGN AVAGRAHA..MALAYALAM VOWEL SIG - {0x0D45, 0x0, propertyUNASSIGNED}, // - {0x0D46, 0x0D48, propertyPVALID}, // MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN - {0x0D49, 0x0, propertyUNASSIGNED}, // - {0x0D4A, 0x0D4D, propertyPVALID}, // MALAYALAM VOWEL SIGN O..MALAYALAM SIGN VIRAM - {0x0D4E, 0x0D56, propertyUNASSIGNED}, // .. - {0x0D57, 0x0, propertyPVALID}, // MALAYALAM AU LENGTH MARK - {0x0D58, 0x0D5F, propertyUNASSIGNED}, // .. - {0x0D60, 0x0D63, propertyPVALID}, // MALAYALAM LETTER VOCALIC RR..MALAYALAM VOWEL - {0x0D64, 0x0D65, propertyUNASSIGNED}, // .. - {0x0D66, 0x0D6F, propertyPVALID}, // MALAYALAM DIGIT ZERO..MALAYALAM DIGIT NINE - {0x0D70, 0x0D75, propertyDISALLOWED}, // MALAYALAM NUMBER TEN..MALAYALAM FRACTION THR - {0x0D76, 0x0D78, propertyUNASSIGNED}, // .. - {0x0D79, 0x0, propertyDISALLOWED}, // MALAYALAM DATE MARK - {0x0D7A, 0x0D7F, propertyPVALID}, // MALAYALAM LETTER CHILLU NN..MALAYALAM LETTER - {0x0D80, 0x0D81, propertyUNASSIGNED}, // .. - {0x0D82, 0x0D83, propertyPVALID}, // SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARG - {0x0D84, 0x0, propertyUNASSIGNED}, // - {0x0D85, 0x0D96, propertyPVALID}, // SINHALA LETTER AYANNA..SINHALA LETTER AUYANN - {0x0D97, 0x0D99, propertyUNASSIGNED}, // .. - {0x0D9A, 0x0DB1, propertyPVALID}, // SINHALA LETTER ALPAPRAANA KAYANNA..SINHALA L - {0x0DB2, 0x0, propertyUNASSIGNED}, // - {0x0DB3, 0x0DBB, propertyPVALID}, // SINHALA LETTER SANYAKA DAYANNA..SINHALA LETT - {0x0DBC, 0x0, propertyUNASSIGNED}, // - {0x0DBD, 0x0, propertyPVALID}, // SINHALA LETTER DANTAJA LAYANNA - {0x0DBE, 0x0DBF, propertyUNASSIGNED}, // .. - {0x0DC0, 0x0DC6, propertyPVALID}, // SINHALA LETTER VAYANNA..SINHALA LETTER FAYAN - {0x0DC7, 0x0DC9, propertyUNASSIGNED}, // .. - {0x0DCA, 0x0, propertyPVALID}, // SINHALA SIGN AL-LAKUNA - {0x0DCB, 0x0DCE, propertyUNASSIGNED}, // .. - {0x0DCF, 0x0DD4, propertyPVALID}, // SINHALA VOWEL SIGN AELA-PILLA..SINHALA VOWEL - {0x0DD5, 0x0, propertyUNASSIGNED}, // - {0x0DD6, 0x0, propertyPVALID}, // SINHALA VOWEL SIGN DIGA PAA-PILLA - {0x0DD7, 0x0, propertyUNASSIGNED}, // - {0x0DD8, 0x0DDF, propertyPVALID}, // SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOW - {0x0DE0, 0x0DF1, propertyUNASSIGNED}, // .. - {0x0DF2, 0x0DF3, propertyPVALID}, // SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHAL - {0x0DF4, 0x0, propertyDISALLOWED}, // SINHALA PUNCTUATION KUNDDALIYA - {0x0DF5, 0x0E00, propertyUNASSIGNED}, // .. - {0x0E01, 0x0E32, propertyPVALID}, // THAI CHARACTER KO KAI..THAI CHARACTER SARA A - {0x0E33, 0x0, propertyDISALLOWED}, // THAI CHARACTER SARA AM - {0x0E34, 0x0E3A, propertyPVALID}, // THAI CHARACTER SARA I..THAI CHARACTER PHINTH - {0x0E3B, 0x0E3E, propertyUNASSIGNED}, // .. - {0x0E3F, 0x0, propertyDISALLOWED}, // THAI CURRENCY SYMBOL BAHT - {0x0E40, 0x0E4E, propertyPVALID}, // THAI CHARACTER SARA E..THAI CHARACTER YAMAKK - {0x0E4F, 0x0, propertyDISALLOWED}, // THAI CHARACTER FONGMAN - {0x0E50, 0x0E59, propertyPVALID}, // THAI DIGIT ZERO..THAI DIGIT NINE - {0x0E5A, 0x0E5B, propertyDISALLOWED}, // THAI CHARACTER ANGKHANKHU..THAI CHARACTER KH - {0x0E5C, 0x0E80, propertyUNASSIGNED}, // .. - {0x0E81, 0x0E82, propertyPVALID}, // LAO LETTER KO..LAO LETTER KHO SUNG - {0x0E83, 0x0, propertyUNASSIGNED}, // - {0x0E84, 0x0, propertyPVALID}, // LAO LETTER KHO TAM - {0x0E85, 0x0E86, propertyUNASSIGNED}, // .. - {0x0E87, 0x0E88, propertyPVALID}, // LAO LETTER NGO..LAO LETTER CO - {0x0E89, 0x0, propertyUNASSIGNED}, // - {0x0E8A, 0x0, propertyPVALID}, // LAO LETTER SO TAM - {0x0E8B, 0x0E8C, propertyUNASSIGNED}, // .. - {0x0E8D, 0x0, propertyPVALID}, // LAO LETTER NYO - {0x0E8E, 0x0E93, propertyUNASSIGNED}, // .. - {0x0E94, 0x0E97, propertyPVALID}, // LAO LETTER DO..LAO LETTER THO TAM - {0x0E98, 0x0, propertyUNASSIGNED}, // - {0x0E99, 0x0E9F, propertyPVALID}, // LAO LETTER NO..LAO LETTER FO SUNG - {0x0EA0, 0x0, propertyUNASSIGNED}, // - {0x0EA1, 0x0EA3, propertyPVALID}, // LAO LETTER MO..LAO LETTER LO LING - {0x0EA4, 0x0, propertyUNASSIGNED}, // - {0x0EA5, 0x0, propertyPVALID}, // LAO LETTER LO LOOT - {0x0EA6, 0x0, propertyUNASSIGNED}, // - {0x0EA7, 0x0, propertyPVALID}, // LAO LETTER WO - {0x0EA8, 0x0EA9, propertyUNASSIGNED}, // .. - {0x0EAA, 0x0EAB, propertyPVALID}, // LAO LETTER SO SUNG..LAO LETTER HO SUNG - {0x0EAC, 0x0, propertyUNASSIGNED}, // - {0x0EAD, 0x0EB2, propertyPVALID}, // LAO LETTER O..LAO VOWEL SIGN AA - {0x0EB3, 0x0, propertyDISALLOWED}, // LAO VOWEL SIGN AM - {0x0EB4, 0x0EB9, propertyPVALID}, // LAO VOWEL SIGN I..LAO VOWEL SIGN UU - {0x0EBA, 0x0, propertyUNASSIGNED}, // - {0x0EBB, 0x0EBD, propertyPVALID}, // LAO VOWEL SIGN MAI KON..LAO SEMIVOWEL SIGN N - {0x0EBE, 0x0EBF, propertyUNASSIGNED}, // .. - {0x0EC0, 0x0EC4, propertyPVALID}, // LAO VOWEL SIGN E..LAO VOWEL SIGN AI - {0x0EC5, 0x0, propertyUNASSIGNED}, // - {0x0EC6, 0x0, propertyPVALID}, // LAO KO LA - {0x0EC7, 0x0, propertyUNASSIGNED}, // - {0x0EC8, 0x0ECD, propertyPVALID}, // LAO TONE MAI EK..LAO NIGGAHITA - {0x0ECE, 0x0ECF, propertyUNASSIGNED}, // .. - {0x0ED0, 0x0ED9, propertyPVALID}, // LAO DIGIT ZERO..LAO DIGIT NINE - {0x0EDA, 0x0EDB, propertyUNASSIGNED}, // .. - {0x0EDC, 0x0EDD, propertyDISALLOWED}, // LAO HO NO..LAO HO MO - {0x0EDE, 0x0EFF, propertyUNASSIGNED}, // .. - {0x0F00, 0x0, propertyPVALID}, // TIBETAN SYLLABLE OM - {0x0F01, 0x0F0A, propertyDISALLOWED}, // TIBETAN MARK GTER YIG MGO TRUNCATED A..TIBET - {0x0F0B, 0x0, propertyPVALID}, // TIBETAN MARK INTERSYLLABIC TSHEG - {0x0F0C, 0x0F17, propertyDISALLOWED}, // TIBETAN MARK DELIMITER TSHEG BSTAR..TIBETAN - {0x0F18, 0x0F19, propertyPVALID}, // TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN - {0x0F1A, 0x0F1F, propertyDISALLOWED}, // TIBETAN SIGN RDEL DKAR GCIG..TIBETAN SIGN RD - {0x0F20, 0x0F29, propertyPVALID}, // TIBETAN DIGIT ZERO..TIBETAN DIGIT NINE - {0x0F2A, 0x0F34, propertyDISALLOWED}, // TIBETAN DIGIT HALF ONE..TIBETAN MARK BSDUS R - {0x0F35, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG NYI ZLA - {0x0F36, 0x0, propertyDISALLOWED}, // TIBETAN MARK CARET -DZUD RTAGS BZHI MIG CAN - {0x0F37, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG SGOR RTAGS - {0x0F38, 0x0, propertyDISALLOWED}, // TIBETAN MARK CHE MGO - {0x0F39, 0x0, propertyPVALID}, // TIBETAN MARK TSA -PHRU - {0x0F3A, 0x0F3D, propertyDISALLOWED}, // TIBETAN MARK GUG RTAGS GYON..TIBETAN MARK AN - {0x0F3E, 0x0F42, propertyPVALID}, // TIBETAN SIGN YAR TSHES..TIBETAN LETTER GA - {0x0F43, 0x0, propertyDISALLOWED}, // TIBETAN LETTER GHA - {0x0F44, 0x0F47, propertyPVALID}, // TIBETAN LETTER NGA..TIBETAN LETTER JA - {0x0F48, 0x0, propertyUNASSIGNED}, // - {0x0F49, 0x0F4C, propertyPVALID}, // TIBETAN LETTER NYA..TIBETAN LETTER DDA - {0x0F4D, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DDHA - {0x0F4E, 0x0F51, propertyPVALID}, // TIBETAN LETTER NNA..TIBETAN LETTER DA - {0x0F52, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DHA - {0x0F53, 0x0F56, propertyPVALID}, // TIBETAN LETTER NA..TIBETAN LETTER BA - {0x0F57, 0x0, propertyDISALLOWED}, // TIBETAN LETTER BHA - {0x0F58, 0x0F5B, propertyPVALID}, // TIBETAN LETTER MA..TIBETAN LETTER DZA - {0x0F5C, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DZHA - {0x0F5D, 0x0F68, propertyPVALID}, // TIBETAN LETTER WA..TIBETAN LETTER A - {0x0F69, 0x0, propertyDISALLOWED}, // TIBETAN LETTER KSSA - {0x0F6A, 0x0F6C, propertyPVALID}, // TIBETAN LETTER FIXED-FORM RA..TIBETAN LETTER - {0x0F6D, 0x0F70, propertyUNASSIGNED}, // .. - {0x0F71, 0x0F72, propertyPVALID}, // TIBETAN VOWEL SIGN AA..TIBETAN VOWEL SIGN I - {0x0F73, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN II - {0x0F74, 0x0, propertyPVALID}, // TIBETAN VOWEL SIGN U - {0x0F75, 0x0F79, propertyDISALLOWED}, // TIBETAN VOWEL SIGN UU..TIBETAN VOWEL SIGN VO - {0x0F7A, 0x0F80, propertyPVALID}, // TIBETAN VOWEL SIGN E..TIBETAN VOWEL SIGN REV - {0x0F81, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN REVERSED II - {0x0F82, 0x0F84, propertyPVALID}, // TIBETAN SIGN NYI ZLA NAA DA..TIBETAN MARK HA - {0x0F85, 0x0, propertyDISALLOWED}, // TIBETAN MARK PALUTA - {0x0F86, 0x0F8B, propertyPVALID}, // TIBETAN SIGN LCI RTAGS..TIBETAN SIGN GRU MED - {0x0F8C, 0x0F8F, propertyUNASSIGNED}, // .. - {0x0F90, 0x0F92, propertyPVALID}, // TIBETAN SUBJOINED LETTER KA..TIBETAN SUBJOIN - {0x0F93, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER GHA - {0x0F94, 0x0F97, propertyPVALID}, // TIBETAN SUBJOINED LETTER NGA..TIBETAN SUBJOI - {0x0F98, 0x0, propertyUNASSIGNED}, // - {0x0F99, 0x0F9C, propertyPVALID}, // TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOI - {0x0F9D, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DDHA - {0x0F9E, 0x0FA1, propertyPVALID}, // TIBETAN SUBJOINED LETTER NNA..TIBETAN SUBJOI - {0x0FA2, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DHA - {0x0FA3, 0x0FA6, propertyPVALID}, // TIBETAN SUBJOINED LETTER NA..TIBETAN SUBJOIN - {0x0FA7, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER BHA - {0x0FA8, 0x0FAB, propertyPVALID}, // TIBETAN SUBJOINED LETTER MA..TIBETAN SUBJOIN - {0x0FAC, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DZHA - {0x0FAD, 0x0FB8, propertyPVALID}, // TIBETAN SUBJOINED LETTER WA..TIBETAN SUBJOIN - {0x0FB9, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER KSSA - {0x0FBA, 0x0FBC, propertyPVALID}, // TIBETAN SUBJOINED LETTER FIXED-FORM WA..TIBE - {0x0FBD, 0x0, propertyUNASSIGNED}, // - {0x0FBE, 0x0FC5, propertyDISALLOWED}, // TIBETAN KU RU KHA..TIBETAN SYMBOL RDO RJE - {0x0FC6, 0x0, propertyPVALID}, // TIBETAN SYMBOL PADMA GDAN - {0x0FC7, 0x0FCC, propertyDISALLOWED}, // TIBETAN SYMBOL RDO RJE RGYA GRAM..TIBETAN SY - {0x0FCD, 0x0, propertyUNASSIGNED}, // - {0x0FCE, 0x0FD8, propertyDISALLOWED}, // TIBETAN SIGN RDEL NAG RDEL DKAR..LEFT-FACING - {0x0FD9, 0x0FFF, propertyUNASSIGNED}, // .. - {0x1000, 0x1049, propertyPVALID}, // MYANMAR LETTER KA..MYANMAR DIGIT NINE - {0x104A, 0x104F, propertyDISALLOWED}, // MYANMAR SIGN LITTLE SECTION..MYANMAR SYMBOL - {0x1050, 0x109D, propertyPVALID}, // MYANMAR LETTER SHA..MYANMAR VOWEL SIGN AITON - {0x109E, 0x10C5, propertyDISALLOWED}, // MYANMAR SYMBOL SHAN ONE..GEORGIAN CAPITAL LE - {0x10C6, 0x10CF, propertyUNASSIGNED}, // .. - {0x10D0, 0x10FA, propertyPVALID}, // GEORGIAN LETTER AN..GEORGIAN LETTER AIN - {0x10FB, 0x10FC, propertyDISALLOWED}, // GEORGIAN PARAGRAPH SEPARATOR..MODIFIER LETTE - {0x10FD, 0x10FF, propertyUNASSIGNED}, // .. - {0x1100, 0x11FF, propertyDISALLOWED}, // HANGUL CHOSEONG KIYEOK..HANGUL JONGSEONG SSA - {0x1200, 0x1248, propertyPVALID}, // ETHIOPIC SYLLABLE HA..ETHIOPIC SYLLABLE QWA - {0x1249, 0x0, propertyUNASSIGNED}, // - {0x124A, 0x124D, propertyPVALID}, // ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE - {0x124E, 0x124F, propertyUNASSIGNED}, // .. - {0x1250, 0x1256, propertyPVALID}, // ETHIOPIC SYLLABLE QHA..ETHIOPIC SYLLABLE QHO - {0x1257, 0x0, propertyUNASSIGNED}, // - {0x1258, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE QHWA - {0x1259, 0x0, propertyUNASSIGNED}, // - {0x125A, 0x125D, propertyPVALID}, // ETHIOPIC SYLLABLE QHWI..ETHIOPIC SYLLABLE QH - {0x125E, 0x125F, propertyUNASSIGNED}, // .. - {0x1260, 0x1288, propertyPVALID}, // ETHIOPIC SYLLABLE BA..ETHIOPIC SYLLABLE XWA - {0x1289, 0x0, propertyUNASSIGNED}, // - {0x128A, 0x128D, propertyPVALID}, // ETHIOPIC SYLLABLE XWI..ETHIOPIC SYLLABLE XWE - {0x128E, 0x128F, propertyUNASSIGNED}, // .. - {0x1290, 0x12B0, propertyPVALID}, // ETHIOPIC SYLLABLE NA..ETHIOPIC SYLLABLE KWA - {0x12B1, 0x0, propertyUNASSIGNED}, // - {0x12B2, 0x12B5, propertyPVALID}, // ETHIOPIC SYLLABLE KWI..ETHIOPIC SYLLABLE KWE - {0x12B6, 0x12B7, propertyUNASSIGNED}, // .. - {0x12B8, 0x12BE, propertyPVALID}, // ETHIOPIC SYLLABLE KXA..ETHIOPIC SYLLABLE KXO - {0x12BF, 0x0, propertyUNASSIGNED}, // - {0x12C0, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE KXWA - {0x12C1, 0x0, propertyUNASSIGNED}, // - {0x12C2, 0x12C5, propertyPVALID}, // ETHIOPIC SYLLABLE KXWI..ETHIOPIC SYLLABLE KX - {0x12C6, 0x12C7, propertyUNASSIGNED}, // .. - {0x12C8, 0x12D6, propertyPVALID}, // ETHIOPIC SYLLABLE WA..ETHIOPIC SYLLABLE PHAR - {0x12D7, 0x0, propertyUNASSIGNED}, // - {0x12D8, 0x1310, propertyPVALID}, // ETHIOPIC SYLLABLE ZA..ETHIOPIC SYLLABLE GWA - {0x1311, 0x0, propertyUNASSIGNED}, // - {0x1312, 0x1315, propertyPVALID}, // ETHIOPIC SYLLABLE GWI..ETHIOPIC SYLLABLE GWE - {0x1316, 0x1317, propertyUNASSIGNED}, // .. - {0x1318, 0x135A, propertyPVALID}, // ETHIOPIC SYLLABLE GGA..ETHIOPIC SYLLABLE FYA - {0x135B, 0x135E, propertyUNASSIGNED}, // .. - {0x135F, 0x0, propertyPVALID}, // ETHIOPIC COMBINING GEMINATION MARK - {0x1360, 0x137C, propertyDISALLOWED}, // ETHIOPIC SECTION MARK..ETHIOPIC NUMBER TEN T - {0x137D, 0x137F, propertyUNASSIGNED}, // .. - {0x1380, 0x138F, propertyPVALID}, // ETHIOPIC SYLLABLE SEBATBEIT MWA..ETHIOPIC SY - {0x1390, 0x1399, propertyDISALLOWED}, // ETHIOPIC TONAL MARK YIZET..ETHIOPIC TONAL MA - {0x139A, 0x139F, propertyUNASSIGNED}, // .. - {0x13A0, 0x13F4, propertyPVALID}, // CHEROKEE LETTER A..CHEROKEE LETTER YV - {0x13F5, 0x13FF, propertyUNASSIGNED}, // .. - {0x1400, 0x0, propertyDISALLOWED}, // CANADIAN SYLLABICS HYPHEN - {0x1401, 0x166C, propertyPVALID}, // CANADIAN SYLLABICS E..CANADIAN SYLLABICS CAR - {0x166D, 0x166E, propertyDISALLOWED}, // CANADIAN SYLLABICS CHI SIGN..CANADIAN SYLLAB - {0x166F, 0x167F, propertyPVALID}, // CANADIAN SYLLABICS QAI..CANADIAN SYLLABICS B - {0x1680, 0x0, propertyDISALLOWED}, // OGHAM SPACE MARK - {0x1681, 0x169A, propertyPVALID}, // OGHAM LETTER BEITH..OGHAM LETTER PEITH - {0x169B, 0x169C, propertyDISALLOWED}, // OGHAM FEATHER MARK..OGHAM REVERSED FEATHER M - {0x169D, 0x169F, propertyUNASSIGNED}, // .. - {0x16A0, 0x16EA, propertyPVALID}, // RUNIC LETTER FEHU FEOH FE F..RUNIC LETTER X - {0x16EB, 0x16F0, propertyDISALLOWED}, // RUNIC SINGLE PUNCTUATION..RUNIC BELGTHOR SYM - {0x16F1, 0x16FF, propertyUNASSIGNED}, // .. - {0x1700, 0x170C, propertyPVALID}, // TAGALOG LETTER A..TAGALOG LETTER YA - {0x170D, 0x0, propertyUNASSIGNED}, // - {0x170E, 0x1714, propertyPVALID}, // TAGALOG LETTER LA..TAGALOG SIGN VIRAMA - {0x1715, 0x171F, propertyUNASSIGNED}, // .. - {0x1720, 0x1734, propertyPVALID}, // HANUNOO LETTER A..HANUNOO SIGN PAMUDPOD - {0x1735, 0x1736, propertyDISALLOWED}, // PHILIPPINE SINGLE PUNCTUATION..PHILIPPINE DO - {0x1737, 0x173F, propertyUNASSIGNED}, // .. - {0x1740, 0x1753, propertyPVALID}, // BUHID LETTER A..BUHID VOWEL SIGN U - {0x1754, 0x175F, propertyUNASSIGNED}, // .. - {0x1760, 0x176C, propertyPVALID}, // TAGBANWA LETTER A..TAGBANWA LETTER YA - {0x176D, 0x0, propertyUNASSIGNED}, // - {0x176E, 0x1770, propertyPVALID}, // TAGBANWA LETTER LA..TAGBANWA LETTER SA - {0x1771, 0x0, propertyUNASSIGNED}, // - {0x1772, 0x1773, propertyPVALID}, // TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U - {0x1774, 0x177F, propertyUNASSIGNED}, // .. - {0x1780, 0x17B3, propertyPVALID}, // KHMER LETTER KA..KHMER INDEPENDENT VOWEL QAU - {0x17B4, 0x17B5, propertyDISALLOWED}, // KHMER VOWEL INHERENT AQ..KHMER VOWEL INHEREN - {0x17B6, 0x17D3, propertyPVALID}, // KHMER VOWEL SIGN AA..KHMER SIGN BATHAMASAT - {0x17D4, 0x17D6, propertyDISALLOWED}, // KHMER SIGN KHAN..KHMER SIGN CAMNUC PII KUUH - {0x17D7, 0x0, propertyPVALID}, // KHMER SIGN LEK TOO - {0x17D8, 0x17DB, propertyDISALLOWED}, // KHMER SIGN BEYYAL..KHMER CURRENCY SYMBOL RIE - {0x17DC, 0x17DD, propertyPVALID}, // KHMER SIGN AVAKRAHASANYA..KHMER SIGN ATTHACA - {0x17DE, 0x17DF, propertyUNASSIGNED}, // .. - {0x17E0, 0x17E9, propertyPVALID}, // KHMER DIGIT ZERO..KHMER DIGIT NINE - {0x17EA, 0x17EF, propertyUNASSIGNED}, // .. - {0x17F0, 0x17F9, propertyDISALLOWED}, // KHMER SYMBOL LEK ATTAK SON..KHMER SYMBOL LEK - {0x17FA, 0x17FF, propertyUNASSIGNED}, // .. - {0x1800, 0x180E, propertyDISALLOWED}, // MONGOLIAN BIRGA..MONGOLIAN VOWEL SEPARATOR - {0x180F, 0x0, propertyUNASSIGNED}, // - {0x1810, 0x1819, propertyPVALID}, // MONGOLIAN DIGIT ZERO..MONGOLIAN DIGIT NINE - {0x181A, 0x181F, propertyUNASSIGNED}, // .. - {0x1820, 0x1877, propertyPVALID}, // MONGOLIAN LETTER A..MONGOLIAN LETTER MANCHU - {0x1878, 0x187F, propertyUNASSIGNED}, // .. - {0x1880, 0x18AA, propertyPVALID}, // MONGOLIAN LETTER ALI GALI ANUSVARA ONE..MONG - {0x18AB, 0x18AF, propertyUNASSIGNED}, // .. - {0x18B0, 0x18F5, propertyPVALID}, // CANADIAN SYLLABICS OY..CANADIAN SYLLABICS CA - {0x18F6, 0x18FF, propertyUNASSIGNED}, // .. - {0x1900, 0x191C, propertyPVALID}, // LIMBU VOWEL-CARRIER LETTER..LIMBU LETTER HA - {0x191D, 0x191F, propertyUNASSIGNED}, // .. - {0x1920, 0x192B, propertyPVALID}, // LIMBU VOWEL SIGN A..LIMBU SUBJOINED LETTER W - {0x192C, 0x192F, propertyUNASSIGNED}, // .. - {0x1930, 0x193B, propertyPVALID}, // LIMBU SMALL LETTER KA..LIMBU SIGN SA-I - {0x193C, 0x193F, propertyUNASSIGNED}, // .. - {0x1940, 0x0, propertyDISALLOWED}, // LIMBU SIGN LOO - {0x1941, 0x1943, propertyUNASSIGNED}, // .. - {0x1944, 0x1945, propertyDISALLOWED}, // LIMBU EXCLAMATION MARK..LIMBU QUESTION MARK - {0x1946, 0x196D, propertyPVALID}, // LIMBU DIGIT ZERO..TAI LE LETTER AI - {0x196E, 0x196F, propertyUNASSIGNED}, // .. - {0x1970, 0x1974, propertyPVALID}, // TAI LE LETTER TONE-2..TAI LE LETTER TONE-6 - {0x1975, 0x197F, propertyUNASSIGNED}, // .. - {0x1980, 0x19AB, propertyPVALID}, // NEW TAI LUE LETTER HIGH QA..NEW TAI LUE LETT - {0x19AC, 0x19AF, propertyUNASSIGNED}, // .. - {0x19B0, 0x19C9, propertyPVALID}, // NEW TAI LUE VOWEL SIGN VOWEL SHORTENER..NEW - {0x19CA, 0x19CF, propertyUNASSIGNED}, // .. - {0x19D0, 0x19DA, propertyPVALID}, // NEW TAI LUE DIGIT ZERO..NEW TAI LUE THAM DIG - {0x19DB, 0x19DD, propertyUNASSIGNED}, // .. - {0x19DE, 0x19FF, propertyDISALLOWED}, // NEW TAI LUE SIGN LAE..KHMER SYMBOL DAP-PRAM - {0x1A00, 0x1A1B, propertyPVALID}, // BUGINESE LETTER KA..BUGINESE VOWEL SIGN AE - {0x1A1C, 0x1A1D, propertyUNASSIGNED}, // .. - {0x1A1E, 0x1A1F, propertyDISALLOWED}, // BUGINESE PALLAWA..BUGINESE END OF SECTION - {0x1A20, 0x1A5E, propertyPVALID}, // TAI THAM LETTER HIGH KA..TAI THAM CONSONANT - {0x1A5F, 0x0, propertyUNASSIGNED}, // - {0x1A60, 0x1A7C, propertyPVALID}, // TAI THAM SIGN SAKOT..TAI THAM SIGN KHUEN-LUE - {0x1A7D, 0x1A7E, propertyUNASSIGNED}, // .. - {0x1A7F, 0x1A89, propertyPVALID}, // TAI THAM COMBINING CRYPTOGRAMMIC DOT..TAI TH - {0x1A8A, 0x1A8F, propertyUNASSIGNED}, // .. - {0x1A90, 0x1A99, propertyPVALID}, // TAI THAM THAM DIGIT ZERO..TAI THAM THAM DIGI - {0x1A9A, 0x1A9F, propertyUNASSIGNED}, // .. - {0x1AA0, 0x1AA6, propertyDISALLOWED}, // TAI THAM SIGN WIANG..TAI THAM SIGN REVERSED - {0x1AA7, 0x0, propertyPVALID}, // TAI THAM SIGN MAI YAMOK - {0x1AA8, 0x1AAD, propertyDISALLOWED}, // TAI THAM SIGN KAAN..TAI THAM SIGN CAANG - {0x1AAE, 0x1AFF, propertyUNASSIGNED}, // .. - {0x1B00, 0x1B4B, propertyPVALID}, // BALINESE SIGN ULU RICEM..BALINESE LETTER ASY - {0x1B4C, 0x1B4F, propertyUNASSIGNED}, // .. - {0x1B50, 0x1B59, propertyPVALID}, // BALINESE DIGIT ZERO..BALINESE DIGIT NINE - {0x1B5A, 0x1B6A, propertyDISALLOWED}, // BALINESE PANTI..BALINESE MUSICAL SYMBOL DANG - {0x1B6B, 0x1B73, propertyPVALID}, // BALINESE MUSICAL SYMBOL COMBINING TEGEH..BAL - {0x1B74, 0x1B7C, propertyDISALLOWED}, // BALINESE MUSICAL SYMBOL RIGHT-HAND OPEN DUG. - {0x1B7D, 0x1B7F, propertyUNASSIGNED}, // .. - {0x1B80, 0x1BAA, propertyPVALID}, // SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PAMA - {0x1BAB, 0x1BAD, propertyUNASSIGNED}, // .. - {0x1BAE, 0x1BB9, propertyPVALID}, // SUNDANESE LETTER KHA..SUNDANESE DIGIT NINE - {0x1BBA, 0x1BFF, propertyUNASSIGNED}, // .. - {0x1C00, 0x1C37, propertyPVALID}, // LEPCHA LETTER KA..LEPCHA SIGN NUKTA - {0x1C38, 0x1C3A, propertyUNASSIGNED}, // .. - {0x1C3B, 0x1C3F, propertyDISALLOWED}, // LEPCHA PUNCTUATION TA-ROL..LEPCHA PUNCTUATIO - {0x1C40, 0x1C49, propertyPVALID}, // LEPCHA DIGIT ZERO..LEPCHA DIGIT NINE - {0x1C4A, 0x1C4C, propertyUNASSIGNED}, // .. - {0x1C4D, 0x1C7D, propertyPVALID}, // LEPCHA LETTER TTA..OL CHIKI AHAD - {0x1C7E, 0x1C7F, propertyDISALLOWED}, // OL CHIKI PUNCTUATION MUCAAD..OL CHIKI PUNCTU - {0x1C80, 0x1CCF, propertyUNASSIGNED}, // .. - {0x1CD0, 0x1CD2, propertyPVALID}, // VEDIC TONE KARSHANA..VEDIC TONE PRENKHA - {0x1CD3, 0x0, propertyDISALLOWED}, // VEDIC SIGN NIHSHVASA - {0x1CD4, 0x1CF2, propertyPVALID}, // VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC - {0x1CF3, 0x1CFF, propertyUNASSIGNED}, // .. - {0x1D00, 0x1D2B, propertyPVALID}, // LATIN LETTER SMALL CAPITAL A..CYRILLIC LETTE - {0x1D2C, 0x1D2E, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL A..MODIFIER LETTER C - {0x1D2F, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL BARRED B - {0x1D30, 0x1D3A, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL D..MODIFIER LETTER C - {0x1D3B, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL REVERSED N - {0x1D3C, 0x1D4D, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL O..MODIFIER LETTER S - {0x1D4E, 0x0, propertyPVALID}, // MODIFIER LETTER SMALL TURNED I - {0x1D4F, 0x1D6A, propertyDISALLOWED}, // MODIFIER LETTER SMALL K..GREEK SUBSCRIPT SMA - {0x1D6B, 0x1D77, propertyPVALID}, // LATIN SMALL LETTER UE..LATIN SMALL LETTER TU - {0x1D78, 0x0, propertyDISALLOWED}, // MODIFIER LETTER CYRILLIC EN - {0x1D79, 0x1D9A, propertyPVALID}, // LATIN SMALL LETTER INSULAR G..LATIN SMALL LE - {0x1D9B, 0x1DBF, propertyDISALLOWED}, // MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER - {0x1DC0, 0x1DE6, propertyPVALID}, // COMBINING DOTTED GRAVE ACCENT..COMBINING LAT - {0x1DE7, 0x1DFC, propertyUNASSIGNED}, // .. - {0x1DFD, 0x1DFF, propertyPVALID}, // COMBINING ALMOST EQUAL TO BELOW..COMBINING R - {0x1E00, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING BELOW - {0x1E01, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING BELOW - {0x1E02, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT ABOVE - {0x1E03, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT ABOVE - {0x1E04, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT BELOW - {0x1E05, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT BELOW - {0x1E06, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH LINE BELOW - {0x1E07, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH LINE BELOW - {0x1E08, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CEDILLA AND ACUT - {0x1E09, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CEDILLA AND ACUTE - {0x1E0A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT ABOVE - {0x1E0B, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT ABOVE - {0x1E0C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT BELOW - {0x1E0D, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT BELOW - {0x1E0E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH LINE BELOW - {0x1E0F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH LINE BELOW - {0x1E10, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CEDILLA - {0x1E11, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CEDILLA - {0x1E12, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW - {0x1E13, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW - {0x1E14, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND GRAVE - {0x1E15, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND GRAVE - {0x1E16, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND ACUTE - {0x1E17, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND ACUTE - {0x1E18, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW - {0x1E19, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW - {0x1E1A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE BELOW - {0x1E1B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE BELOW - {0x1E1C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA AND BREV - {0x1E1D, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA AND BREVE - {0x1E1E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER F WITH DOT ABOVE - {0x1E1F, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH DOT ABOVE - {0x1E20, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH MACRON - {0x1E21, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH MACRON - {0x1E22, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT ABOVE - {0x1E23, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT ABOVE - {0x1E24, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT BELOW - {0x1E25, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT BELOW - {0x1E26, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DIAERESIS - {0x1E27, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DIAERESIS - {0x1E28, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CEDILLA - {0x1E29, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CEDILLA - {0x1E2A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH BREVE BELOW - {0x1E2B, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH BREVE BELOW - {0x1E2C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE BELOW - {0x1E2D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE BELOW - {0x1E2E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DIAERESIS AND AC - {0x1E2F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DIAERESIS AND ACUT - {0x1E30, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH ACUTE - {0x1E31, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH ACUTE - {0x1E32, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DOT BELOW - {0x1E33, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DOT BELOW - {0x1E34, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH LINE BELOW - {0x1E35, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH LINE BELOW - {0x1E36, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW - {0x1E37, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW - {0x1E38, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW AND MA - {0x1E39, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW AND MACR - {0x1E3A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH LINE BELOW - {0x1E3B, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH LINE BELOW - {0x1E3C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW - {0x1E3D, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW - {0x1E3E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH ACUTE - {0x1E3F, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH ACUTE - {0x1E40, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT ABOVE - {0x1E41, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT ABOVE - {0x1E42, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT BELOW - {0x1E43, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT BELOW - {0x1E44, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT ABOVE - {0x1E45, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT ABOVE - {0x1E46, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT BELOW - {0x1E47, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT BELOW - {0x1E48, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LINE BELOW - {0x1E49, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LINE BELOW - {0x1E4A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW - {0x1E4B, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW - {0x1E4C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND ACUTE - {0x1E4D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND ACUTE - {0x1E4E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND DIAERE - {0x1E4F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND DIAERESI - {0x1E50, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND GRAVE - {0x1E51, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND GRAVE - {0x1E52, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND ACUTE - {0x1E53, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND ACUTE - {0x1E54, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH ACUTE - {0x1E55, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH ACUTE - {0x1E56, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH DOT ABOVE - {0x1E57, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH DOT ABOVE - {0x1E58, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT ABOVE - {0x1E59, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT ABOVE - {0x1E5A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW - {0x1E5B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW - {0x1E5C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW AND MA - {0x1E5D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW AND MACR - {0x1E5E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH LINE BELOW - {0x1E5F, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH LINE BELOW - {0x1E60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT ABOVE - {0x1E61, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT ABOVE - {0x1E62, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW - {0x1E63, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW - {0x1E64, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE AND DOT AB - {0x1E65, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE AND DOT ABOV - {0x1E66, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON AND DOT AB - {0x1E67, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON AND DOT ABOV - {0x1E68, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW AND DO - {0x1E69, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW AND DOT - {0x1E6A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT ABOVE - {0x1E6B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT ABOVE - {0x1E6C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT BELOW - {0x1E6D, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT BELOW - {0x1E6E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH LINE BELOW - {0x1E6F, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH LINE BELOW - {0x1E70, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW - {0x1E71, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW - {0x1E72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS BELOW - {0x1E73, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS BELOW - {0x1E74, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE BELOW - {0x1E75, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE BELOW - {0x1E76, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW - {0x1E77, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW - {0x1E78, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE AND ACUTE - {0x1E79, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE AND ACUTE - {0x1E7A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON AND DIAER - {0x1E7B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON AND DIAERES - {0x1E7C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH TILDE - {0x1E7D, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH TILDE - {0x1E7E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DOT BELOW - {0x1E7F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DOT BELOW - {0x1E80, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH GRAVE - {0x1E81, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH GRAVE - {0x1E82, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH ACUTE - {0x1E83, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH ACUTE - {0x1E84, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DIAERESIS - {0x1E85, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DIAERESIS - {0x1E86, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT ABOVE - {0x1E87, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT ABOVE - {0x1E88, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT BELOW - {0x1E89, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT BELOW - {0x1E8A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DOT ABOVE - {0x1E8B, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DOT ABOVE - {0x1E8C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DIAERESIS - {0x1E8D, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DIAERESIS - {0x1E8E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT ABOVE - {0x1E8F, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT ABOVE - {0x1E90, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CIRCUMFLEX - {0x1E91, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CIRCUMFLEX - {0x1E92, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT BELOW - {0x1E93, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT BELOW - {0x1E94, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH LINE BELOW - {0x1E95, 0x1E99, propertyPVALID}, // LATIN SMALL LETTER Z WITH LINE BELOW..LATIN - {0x1E9A, 0x1E9B, propertyDISALLOWED}, // LATIN SMALL LETTER A WITH RIGHT HALF RING..L - {0x1E9C, 0x1E9D, propertyPVALID}, // LATIN SMALL LETTER LONG S WITH DIAGONAL STRO - {0x1E9E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SHARP S - {0x1E9F, 0x0, propertyPVALID}, // LATIN SMALL LETTER DELTA - {0x1EA0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT BELOW - {0x1EA1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT BELOW - {0x1EA2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH HOOK ABOVE - {0x1EA3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH HOOK ABOVE - {0x1EA4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND A - {0x1EA5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACU - {0x1EA6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND G - {0x1EA7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRA - {0x1EA8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND H - {0x1EA9, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOO - {0x1EAA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND T - {0x1EAB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND TIL - {0x1EAC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND D - {0x1EAD, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT - {0x1EAE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND ACUTE - {0x1EAF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND ACUTE - {0x1EB0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND GRAVE - {0x1EB1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND GRAVE - {0x1EB2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND HOOK A - {0x1EB3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND HOOK ABO - {0x1EB4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND TILDE - {0x1EB5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND TILDE - {0x1EB6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND DOT BE - {0x1EB7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND DOT BELO - {0x1EB8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT BELOW - {0x1EB9, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT BELOW - {0x1EBA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH HOOK ABOVE - {0x1EBB, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH HOOK ABOVE - {0x1EBC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE - {0x1EBD, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE - {0x1EBE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND A - {0x1EBF, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACU - {0x1EC0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND G - {0x1EC1, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRA - {0x1EC2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND H - {0x1EC3, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOO - {0x1EC4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND T - {0x1EC5, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND TIL - {0x1EC6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND D - {0x1EC7, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT - {0x1EC8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH HOOK ABOVE - {0x1EC9, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH HOOK ABOVE - {0x1ECA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT BELOW - {0x1ECB, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOT BELOW - {0x1ECC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT BELOW - {0x1ECD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT BELOW - {0x1ECE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HOOK ABOVE - {0x1ECF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HOOK ABOVE - {0x1ED0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND A - {0x1ED1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACU - {0x1ED2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND G - {0x1ED3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRA - {0x1ED4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND H - {0x1ED5, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOO - {0x1ED6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND T - {0x1ED7, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND TIL - {0x1ED8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND D - {0x1ED9, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT - {0x1EDA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND ACUTE - {0x1EDB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND ACUTE - {0x1EDC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND GRAVE - {0x1EDD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND GRAVE - {0x1EDE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND HOOK AB - {0x1EDF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND HOOK ABOV - {0x1EE0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND TILDE - {0x1EE1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND TILDE - {0x1EE2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND DOT BEL - {0x1EE3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND DOT BELOW - {0x1EE4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOT BELOW - {0x1EE5, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOT BELOW - {0x1EE6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HOOK ABOVE - {0x1EE7, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HOOK ABOVE - {0x1EE8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND ACUTE - {0x1EE9, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND ACUTE - {0x1EEA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND GRAVE - {0x1EEB, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND GRAVE - {0x1EEC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND HOOK AB - {0x1EED, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND HOOK ABOV - {0x1EEE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND TILDE - {0x1EEF, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND TILDE - {0x1EF0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND DOT BEL - {0x1EF1, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND DOT BELOW - {0x1EF2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH GRAVE - {0x1EF3, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH GRAVE - {0x1EF4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT BELOW - {0x1EF5, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT BELOW - {0x1EF6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH HOOK ABOVE - {0x1EF7, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK ABOVE - {0x1EF8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH TILDE - {0x1EF9, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH TILDE - {0x1EFA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH LL - {0x1EFB, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH LL - {0x1EFC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH V - {0x1EFD, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH V - {0x1EFE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH LOOP - {0x1EFF, 0x1F07, propertyPVALID}, // LATIN SMALL LETTER Y WITH LOOP..GREEK SMALL - {0x1F08, 0x1F0F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA WITH PSILI..GREEK - {0x1F10, 0x1F15, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH PSILI..GREEK - {0x1F16, 0x1F17, propertyUNASSIGNED}, // .. - {0x1F18, 0x1F1D, propertyDISALLOWED}, // GREEK CAPITAL LETTER EPSILON WITH PSILI..GRE - {0x1F1E, 0x1F1F, propertyUNASSIGNED}, // .. - {0x1F20, 0x1F27, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PSILI..GREEK SMA - {0x1F28, 0x1F2F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ETA WITH PSILI..GREEK C - {0x1F30, 0x1F37, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PSILI..GREEK SM - {0x1F38, 0x1F3F, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH PSILI..GREEK - {0x1F40, 0x1F45, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH PSILI..GREEK - {0x1F46, 0x1F47, propertyUNASSIGNED}, // .. - {0x1F48, 0x1F4D, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH PSILI..GRE - {0x1F4E, 0x1F4F, propertyUNASSIGNED}, // .. - {0x1F50, 0x1F57, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH PSILI..GREEK - {0x1F58, 0x0, propertyUNASSIGNED}, // - {0x1F59, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA - {0x1F5A, 0x0, propertyUNASSIGNED}, // - {0x1F5B, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND - {0x1F5C, 0x0, propertyUNASSIGNED}, // - {0x1F5D, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND - {0x1F5E, 0x0, propertyUNASSIGNED}, // - {0x1F5F, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND - {0x1F60, 0x1F67, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PSILI..GREEK S - {0x1F68, 0x1F6F, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMEGA WITH PSILI..GREEK - {0x1F70, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VARIA - {0x1F71, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH OXIA - {0x1F72, 0x0, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH VARIA - {0x1F73, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER EPSILON WITH OXIA - {0x1F74, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH VARIA - {0x1F75, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH OXIA - {0x1F76, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VARIA - {0x1F77, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH OXIA - {0x1F78, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH VARIA - {0x1F79, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMICRON WITH OXIA - {0x1F7A, 0x0, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VARIA - {0x1F7B, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH OXIA - {0x1F7C, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH VARIA - {0x1F7D, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH OXIA - {0x1F7E, 0x1F7F, propertyUNASSIGNED}, // .. - {0x1F80, 0x1FAF, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PSILI AND YPOG - {0x1FB0, 0x1FB1, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VRACHY..GREEK - {0x1FB2, 0x1FB4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH VARIA AND YPOG - {0x1FB5, 0x0, propertyUNASSIGNED}, // - {0x1FB6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI - {0x1FB7, 0x1FC4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI AN - {0x1FC5, 0x0, propertyUNASSIGNED}, // - {0x1FC6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PERISPOMENI - {0x1FC7, 0x1FCF, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH PERISPOMENI AND - {0x1FD0, 0x1FD2, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VRACHY..GREEK S - {0x1FD3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND O - {0x1FD4, 0x1FD5, propertyUNASSIGNED}, // .. - {0x1FD6, 0x1FD7, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PERISPOMENI..GR - {0x1FD8, 0x1FDB, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH VRACHY..GREEK - {0x1FDC, 0x0, propertyUNASSIGNED}, // - {0x1FDD, 0x1FDF, propertyDISALLOWED}, // GREEK DASIA AND VARIA..GREEK DASIA AND PERIS - {0x1FE0, 0x1FE2, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VRACHY..GREE - {0x1FE3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH DIALYTIKA AN - {0x1FE4, 0x1FE7, propertyPVALID}, // GREEK SMALL LETTER RHO WITH PSILI..GREEK SMA - {0x1FE8, 0x1FEF, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH VRACHY..GR - {0x1FF0, 0x1FF1, propertyUNASSIGNED}, // .. - {0x1FF2, 0x1FF4, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH VARIA AND YPOG - {0x1FF5, 0x0, propertyUNASSIGNED}, // - {0x1FF6, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI - {0x1FF7, 0x1FFE, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI AN - {0x1FFF, 0x0, propertyUNASSIGNED}, // - {0x2000, 0x200B, propertyDISALLOWED}, // EN QUAD..ZERO WIDTH SPACE - {0x200C, 0x200D, propertyCONTEXTJ}, // ZERO WIDTH NON-JOINER..ZERO WIDTH JOINER - {0x200E, 0x2064, propertyDISALLOWED}, // LEFT-TO-RIGHT MARK..INVISIBLE PLUS - {0x2065, 0x2069, propertyUNASSIGNED}, // .. - {0x206A, 0x2071, propertyDISALLOWED}, // INHIBIT SYMMETRIC SWAPPING..SUPERSCRIPT LATI - {0x2072, 0x2073, propertyUNASSIGNED}, // .. - {0x2074, 0x208E, propertyDISALLOWED}, // SUPERSCRIPT FOUR..SUBSCRIPT RIGHT PARENTHESI - {0x208F, 0x0, propertyUNASSIGNED}, // - {0x2090, 0x2094, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER A..LATIN SUBSCR - {0x2095, 0x209F, propertyUNASSIGNED}, // .. - {0x20A0, 0x20B8, propertyDISALLOWED}, // EURO-CURRENCY SIGN..TENGE SIGN - {0x20B9, 0x20CF, propertyUNASSIGNED}, // .. - {0x20D0, 0x20F0, propertyDISALLOWED}, // COMBINING LEFT HARPOON ABOVE..COMBINING ASTE - {0x20F1, 0x20FF, propertyUNASSIGNED}, // .. - {0x2100, 0x214D, propertyDISALLOWED}, // ACCOUNT OF..AKTIESELSKAB - {0x214E, 0x0, propertyPVALID}, // TURNED SMALL F - {0x214F, 0x2183, propertyDISALLOWED}, // SYMBOL FOR SAMARITAN SOURCE..ROMAN NUMERAL R - {0x2184, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C - {0x2185, 0x2189, propertyDISALLOWED}, // ROMAN NUMERAL SIX LATE FORM..VULGAR FRACTION - {0x218A, 0x218F, propertyUNASSIGNED}, // .. - {0x2190, 0x23E8, propertyDISALLOWED}, // LEFTWARDS ARROW..DECIMAL EXPONENT SYMBOL - {0x23E9, 0x23FF, propertyUNASSIGNED}, // .. - {0x2400, 0x2426, propertyDISALLOWED}, // SYMBOL FOR NULL..SYMBOL FOR SUBSTITUTE FORM - {0x2427, 0x243F, propertyUNASSIGNED}, // .. - {0x2440, 0x244A, propertyDISALLOWED}, // OCR HOOK..OCR DOUBLE BACKSLASH - {0x244B, 0x245F, propertyUNASSIGNED}, // .. - {0x2460, 0x26CD, propertyDISALLOWED}, // CIRCLED DIGIT ONE..DISABLED CAR - {0x26CE, 0x0, propertyUNASSIGNED}, // - {0x26CF, 0x26E1, propertyDISALLOWED}, // PICK..RESTRICTED LEFT ENTRY-2 - {0x26E2, 0x0, propertyUNASSIGNED}, // - {0x26E3, 0x0, propertyDISALLOWED}, // HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE - {0x26E4, 0x26E7, propertyUNASSIGNED}, // .. - {0x26E8, 0x26FF, propertyDISALLOWED}, // BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZ - {0x2700, 0x0, propertyUNASSIGNED}, // - {0x2701, 0x2704, propertyDISALLOWED}, // UPPER BLADE SCISSORS..WHITE SCISSORS - {0x2705, 0x0, propertyUNASSIGNED}, // - {0x2706, 0x2709, propertyDISALLOWED}, // TELEPHONE LOCATION SIGN..ENVELOPE - {0x270A, 0x270B, propertyUNASSIGNED}, // .. - {0x270C, 0x2727, propertyDISALLOWED}, // VICTORY HAND..WHITE FOUR POINTED STAR - {0x2728, 0x0, propertyUNASSIGNED}, // - {0x2729, 0x274B, propertyDISALLOWED}, // STRESS OUTLINED WHITE STAR..HEAVY EIGHT TEAR - {0x274C, 0x0, propertyUNASSIGNED}, // - {0x274D, 0x0, propertyDISALLOWED}, // SHADOWED WHITE CIRCLE - {0x274E, 0x0, propertyUNASSIGNED}, // - {0x274F, 0x2752, propertyDISALLOWED}, // LOWER RIGHT DROP-SHADOWED WHITE SQUARE..UPPE - {0x2753, 0x2755, propertyUNASSIGNED}, // .. - {0x2756, 0x275E, propertyDISALLOWED}, // BLACK DIAMOND MINUS WHITE X..HEAVY DOUBLE CO - {0x275F, 0x2760, propertyUNASSIGNED}, // .. - {0x2761, 0x2794, propertyDISALLOWED}, // CURVED STEM PARAGRAPH SIGN ORNAMENT..HEAVY W - {0x2795, 0x2797, propertyUNASSIGNED}, // .. - {0x2798, 0x27AF, propertyDISALLOWED}, // HEAVY SOUTH EAST ARROW..NOTCHED LOWER RIGHT- - {0x27B0, 0x0, propertyUNASSIGNED}, // - {0x27B1, 0x27BE, propertyDISALLOWED}, // NOTCHED UPPER RIGHT-SHADOWED WHITE RIGHTWARD - {0x27BF, 0x0, propertyUNASSIGNED}, // - {0x27C0, 0x27CA, propertyDISALLOWED}, // THREE DIMENSIONAL ANGLE..VERTICAL BAR WITH H - {0x27CB, 0x0, propertyUNASSIGNED}, // - {0x27CC, 0x0, propertyDISALLOWED}, // LONG DIVISION - {0x27CD, 0x27CF, propertyUNASSIGNED}, // .. - {0x27D0, 0x2B4C, propertyDISALLOWED}, // WHITE DIAMOND WITH CENTRED DOT..RIGHTWARDS A - {0x2B4D, 0x2B4F, propertyUNASSIGNED}, // .. - {0x2B50, 0x2B59, propertyDISALLOWED}, // WHITE MEDIUM STAR..HEAVY CIRCLED SALTIRE - {0x2B5A, 0x2BFF, propertyUNASSIGNED}, // .. - {0x2C00, 0x2C2E, propertyDISALLOWED}, // GLAGOLITIC CAPITAL LETTER AZU..GLAGOLITIC CA - {0x2C2F, 0x0, propertyUNASSIGNED}, // - {0x2C30, 0x2C5E, propertyPVALID}, // GLAGOLITIC SMALL LETTER AZU..GLAGOLITIC SMAL - {0x2C5F, 0x0, propertyUNASSIGNED}, // - {0x2C60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOUBLE BAR - {0x2C61, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOUBLE BAR - {0x2C62, 0x2C64, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE TILDE..LA - {0x2C65, 0x2C66, propertyPVALID}, // LATIN SMALL LETTER A WITH STROKE..LATIN SMAL - {0x2C67, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DESCENDER - {0x2C68, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DESCENDER - {0x2C69, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DESCENDER - {0x2C6A, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DESCENDER - {0x2C6B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DESCENDER - {0x2C6C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DESCENDER - {0x2C6D, 0x2C70, propertyDISALLOWED}, // LATIN CAPITAL LETTER ALPHA..LATIN CAPITAL LE - {0x2C71, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH RIGHT HOOK - {0x2C72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH HOOK - {0x2C73, 0x2C74, propertyPVALID}, // LATIN SMALL LETTER W WITH HOOK..LATIN SMALL - {0x2C75, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HALF H - {0x2C76, 0x2C7B, propertyPVALID}, // LATIN SMALL LETTER HALF H..LATIN LETTER SMAL - {0x2C7C, 0x2C80, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER J..COPTIC CAPIT - {0x2C81, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ALFA - {0x2C82, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER VIDA - {0x2C83, 0x0, propertyPVALID}, // COPTIC SMALL LETTER VIDA - {0x2C84, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GAMMA - {0x2C85, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GAMMA - {0x2C86, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DALDA - {0x2C87, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DALDA - {0x2C88, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER EIE - {0x2C89, 0x0, propertyPVALID}, // COPTIC SMALL LETTER EIE - {0x2C8A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SOU - {0x2C8B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SOU - {0x2C8C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER ZATA - {0x2C8D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ZATA - {0x2C8E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HATE - {0x2C8F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HATE - {0x2C90, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER THETHE - {0x2C91, 0x0, propertyPVALID}, // COPTIC SMALL LETTER THETHE - {0x2C92, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER IAUDA - {0x2C93, 0x0, propertyPVALID}, // COPTIC SMALL LETTER IAUDA - {0x2C94, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KAPA - {0x2C95, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KAPA - {0x2C96, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER LAULA - {0x2C97, 0x0, propertyPVALID}, // COPTIC SMALL LETTER LAULA - {0x2C98, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER MI - {0x2C99, 0x0, propertyPVALID}, // COPTIC SMALL LETTER MI - {0x2C9A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER NI - {0x2C9B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER NI - {0x2C9C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KSI - {0x2C9D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KSI - {0x2C9E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER O - {0x2C9F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER O - {0x2CA0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PI - {0x2CA1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PI - {0x2CA2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER RO - {0x2CA3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER RO - {0x2CA4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SIMA - {0x2CA5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SIMA - {0x2CA6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER TAU - {0x2CA7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER TAU - {0x2CA8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER UA - {0x2CA9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER UA - {0x2CAA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FI - {0x2CAB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FI - {0x2CAC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHI - {0x2CAD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHI - {0x2CAE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PSI - {0x2CAF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PSI - {0x2CB0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OOU - {0x2CB1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OOU - {0x2CB2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P ALEF - {0x2CB3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P ALEF - {0x2CB4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC AIN - {0x2CB5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC AIN - {0x2CB6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC EIE - {0x2CB7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC EIE - {0x2CB8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P KAPA - {0x2CB9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P KAPA - {0x2CBA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P NI - {0x2CBB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P NI - {0x2CBC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC NI - {0x2CBD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC NI - {0x2CBE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC OOU - {0x2CBF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC OOU - {0x2CC0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SAMPI - {0x2CC1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SAMPI - {0x2CC2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CROSSED SHEI - {0x2CC3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CROSSED SHEI - {0x2CC4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHEI - {0x2CC5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHEI - {0x2CC6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC ESH - {0x2CC7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC ESH - {0x2CC8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER AKHMIMIC KHEI - {0x2CC9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER AKHMIMIC KHEI - {0x2CCA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P HORI - {0x2CCB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P HORI - {0x2CCC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HORI - {0x2CCD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HORI - {0x2CCE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HA - {0x2CCF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HA - {0x2CD0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER L-SHAPED HA - {0x2CD1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER L-SHAPED HA - {0x2CD2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HEI - {0x2CD3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HEI - {0x2CD4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HAT - {0x2CD5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HAT - {0x2CD6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC GANGIA - {0x2CD7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC GANGIA - {0x2CD8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC DJA - {0x2CD9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC DJA - {0x2CDA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHIMA - {0x2CDB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHIMA - {0x2CDC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN SHIMA - {0x2CDD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN SHIMA - {0x2CDE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NGI - {0x2CDF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NGI - {0x2CE0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NYI - {0x2CE1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NYI - {0x2CE2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN WAU - {0x2CE3, 0x2CE4, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN WAU..COPTIC S - {0x2CE5, 0x2CEB, propertyDISALLOWED}, // COPTIC SYMBOL MI RO..COPTIC CAPITAL LETTER C - {0x2CEC, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC SHEI - {0x2CED, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC GANGIA - {0x2CEE, 0x2CF1, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC GANGIA..CO - {0x2CF2, 0x2CF8, propertyUNASSIGNED}, // .. - {0x2CF9, 0x2CFF, propertyDISALLOWED}, // COPTIC OLD NUBIAN FULL STOP..COPTIC MORPHOLO - {0x2D00, 0x2D25, propertyPVALID}, // GEORGIAN SMALL LETTER AN..GEORGIAN SMALL LET - {0x2D26, 0x2D2F, propertyUNASSIGNED}, // .. - {0x2D30, 0x2D65, propertyPVALID}, // TIFINAGH LETTER YA..TIFINAGH LETTER YAZZ - {0x2D66, 0x2D6E, propertyUNASSIGNED}, // .. - {0x2D6F, 0x0, propertyDISALLOWED}, // TIFINAGH MODIFIER LETTER LABIALIZATION MARK - {0x2D70, 0x2D7F, propertyUNASSIGNED}, // .. - {0x2D80, 0x2D96, propertyPVALID}, // ETHIOPIC SYLLABLE LOA..ETHIOPIC SYLLABLE GGW - {0x2D97, 0x2D9F, propertyUNASSIGNED}, // .. - {0x2DA0, 0x2DA6, propertyPVALID}, // ETHIOPIC SYLLABLE SSA..ETHIOPIC SYLLABLE SSO - {0x2DA7, 0x0, propertyUNASSIGNED}, // - {0x2DA8, 0x2DAE, propertyPVALID}, // ETHIOPIC SYLLABLE CCA..ETHIOPIC SYLLABLE CCO - {0x2DAF, 0x0, propertyUNASSIGNED}, // - {0x2DB0, 0x2DB6, propertyPVALID}, // ETHIOPIC SYLLABLE ZZA..ETHIOPIC SYLLABLE ZZO - {0x2DB7, 0x0, propertyUNASSIGNED}, // - {0x2DB8, 0x2DBE, propertyPVALID}, // ETHIOPIC SYLLABLE CCHA..ETHIOPIC SYLLABLE CC - {0x2DBF, 0x0, propertyUNASSIGNED}, // - {0x2DC0, 0x2DC6, propertyPVALID}, // ETHIOPIC SYLLABLE QYA..ETHIOPIC SYLLABLE QYO - {0x2DC7, 0x0, propertyUNASSIGNED}, // - {0x2DC8, 0x2DCE, propertyPVALID}, // ETHIOPIC SYLLABLE KYA..ETHIOPIC SYLLABLE KYO - {0x2DCF, 0x0, propertyUNASSIGNED}, // - {0x2DD0, 0x2DD6, propertyPVALID}, // ETHIOPIC SYLLABLE XYA..ETHIOPIC SYLLABLE XYO - {0x2DD7, 0x0, propertyUNASSIGNED}, // - {0x2DD8, 0x2DDE, propertyPVALID}, // ETHIOPIC SYLLABLE GYA..ETHIOPIC SYLLABLE GYO - {0x2DDF, 0x0, propertyUNASSIGNED}, // - {0x2DE0, 0x2DFF, propertyPVALID}, // COMBINING CYRILLIC LETTER BE..COMBINING CYRI - {0x2E00, 0x2E2E, propertyDISALLOWED}, // RIGHT ANGLE SUBSTITUTION MARKER..REVERSED QU - {0x2E2F, 0x0, propertyPVALID}, // VERTICAL TILDE - {0x2E30, 0x2E31, propertyDISALLOWED}, // RING POINT..WORD SEPARATOR MIDDLE DOT - {0x2E32, 0x2E7F, propertyUNASSIGNED}, // .. - {0x2E80, 0x2E99, propertyDISALLOWED}, // CJK RADICAL REPEAT..CJK RADICAL RAP - {0x2E9A, 0x0, propertyUNASSIGNED}, // - {0x2E9B, 0x2EF3, propertyDISALLOWED}, // CJK RADICAL CHOKE..CJK RADICAL C-SIMPLIFIED - {0x2EF4, 0x2EFF, propertyUNASSIGNED}, // .. - {0x2F00, 0x2FD5, propertyDISALLOWED}, // KANGXI RADICAL ONE..KANGXI RADICAL FLUTE - {0x2FD6, 0x2FEF, propertyUNASSIGNED}, // .. - {0x2FF0, 0x2FFB, propertyDISALLOWED}, // IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO RI - {0x2FFC, 0x2FFF, propertyUNASSIGNED}, // .. - {0x3000, 0x3004, propertyDISALLOWED}, // IDEOGRAPHIC SPACE..JAPANESE INDUSTRIAL STAND - {0x3005, 0x3007, propertyPVALID}, // IDEOGRAPHIC ITERATION MARK..IDEOGRAPHIC NUMB - {0x3008, 0x3029, propertyDISALLOWED}, // LEFT ANGLE BRACKET..HANGZHOU NUMERAL NINE - {0x302A, 0x302D, propertyPVALID}, // IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENT - {0x302E, 0x303B, propertyDISALLOWED}, // HANGUL SINGLE DOT TONE MARK..VERTICAL IDEOGR - {0x303C, 0x0, propertyPVALID}, // MASU MARK - {0x303D, 0x303F, propertyDISALLOWED}, // PART ALTERNATION MARK..IDEOGRAPHIC HALF FILL - {0x3040, 0x0, propertyUNASSIGNED}, // - {0x3041, 0x3096, propertyPVALID}, // HIRAGANA LETTER SMALL A..HIRAGANA LETTER SMA - {0x3097, 0x3098, propertyUNASSIGNED}, // .. - {0x3099, 0x309A, propertyPVALID}, // COMBINING KATAKANA-HIRAGANA VOICED SOUND MAR - {0x309B, 0x309C, propertyDISALLOWED}, // KATAKANA-HIRAGANA VOICED SOUND MARK..KATAKAN - {0x309D, 0x309E, propertyPVALID}, // HIRAGANA ITERATION MARK..HIRAGANA VOICED ITE - {0x309F, 0x30A0, propertyDISALLOWED}, // HIRAGANA DIGRAPH YORI..KATAKANA-HIRAGANA DOU - {0x30A1, 0x30FA, propertyPVALID}, // KATAKANA LETTER SMALL A..KATAKANA LETTER VO - {0x30FB, 0x0, propertyCONTEXTO}, // KATAKANA MIDDLE DOT - {0x30FC, 0x30FE, propertyPVALID}, // KATAKANA-HIRAGANA PROLONGED SOUND MARK..KATA - {0x30FF, 0x0, propertyDISALLOWED}, // KATAKANA DIGRAPH KOTO - {0x3100, 0x3104, propertyUNASSIGNED}, // .. - {0x3105, 0x312D, propertyPVALID}, // BOPOMOFO LETTER B..BOPOMOFO LETTER IH - {0x312E, 0x3130, propertyUNASSIGNED}, // .. - {0x3131, 0x318E, propertyDISALLOWED}, // HANGUL LETTER KIYEOK..HANGUL LETTER ARAEAE - {0x318F, 0x0, propertyUNASSIGNED}, // - {0x3190, 0x319F, propertyDISALLOWED}, // IDEOGRAPHIC ANNOTATION LINKING MARK..IDEOGRA - {0x31A0, 0x31B7, propertyPVALID}, // BOPOMOFO LETTER BU..BOPOMOFO FINAL LETTER H - {0x31B8, 0x31BF, propertyUNASSIGNED}, // .. - {0x31C0, 0x31E3, propertyDISALLOWED}, // CJK STROKE T..CJK STROKE Q - {0x31E4, 0x31EF, propertyUNASSIGNED}, // .. - {0x31F0, 0x31FF, propertyPVALID}, // KATAKANA LETTER SMALL KU..KATAKANA LETTER SM - {0x3200, 0x321E, propertyDISALLOWED}, // PARENTHESIZED HANGUL KIYEOK..PARENTHESIZED K - {0x321F, 0x0, propertyUNASSIGNED}, // - {0x3220, 0x32FE, propertyDISALLOWED}, // PARENTHESIZED IDEOGRAPH ONE..CIRCLED KATAKAN - {0x32FF, 0x0, propertyUNASSIGNED}, // - {0x3300, 0x33FF, propertyDISALLOWED}, // SQUARE APAATO..SQUARE GAL - {0x3400, 0x4DB5, propertyPVALID}, // .... - {0x4DC0, 0x4DFF, propertyDISALLOWED}, // HEXAGRAM FOR THE CREATIVE HEAVEN..HEXAGRAM F - {0x4E00, 0x9FCB, propertyPVALID}, // .. - {0x9FCC, 0x9FFF, propertyUNASSIGNED}, // .. - {0xA000, 0xA48C, propertyPVALID}, // YI SYLLABLE IT..YI SYLLABLE YYR - {0xA48D, 0xA48F, propertyUNASSIGNED}, // .. - {0xA490, 0xA4C6, propertyDISALLOWED}, // YI RADICAL QOT..YI RADICAL KE - {0xA4C7, 0xA4CF, propertyUNASSIGNED}, // .. - {0xA4D0, 0xA4FD, propertyPVALID}, // LISU LETTER BA..LISU LETTER TONE MYA JEU - {0xA4FE, 0xA4FF, propertyDISALLOWED}, // LISU PUNCTUATION COMMA..LISU PUNCTUATION FUL - {0xA500, 0xA60C, propertyPVALID}, // VAI SYLLABLE EE..VAI SYLLABLE LENGTHENER - {0xA60D, 0xA60F, propertyDISALLOWED}, // VAI COMMA..VAI QUESTION MARK - {0xA610, 0xA62B, propertyPVALID}, // VAI SYLLABLE NDOLE FA..VAI SYLLABLE NDOLE DO - {0xA62C, 0xA63F, propertyUNASSIGNED}, // .. - {0xA640, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZEMLYA - {0xA641, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZEMLYA - {0xA642, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZELO - {0xA643, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZELO - {0xA644, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED DZE - {0xA645, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED DZE - {0xA646, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTA - {0xA647, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTA - {0xA648, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DJERV - {0xA649, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DJERV - {0xA64A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOGRAPH UK - {0xA64B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOGRAPH UK - {0xA64C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BROAD OMEGA - {0xA64D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BROAD OMEGA - {0xA64E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER NEUTRAL YER - {0xA64F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER NEUTRAL YER - {0xA650, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH BACK YER - {0xA651, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH BACK YER - {0xA652, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED YAT - {0xA653, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED YAT - {0xA654, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED YU - {0xA655, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED YU - {0xA656, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED A - {0xA657, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED A - {0xA658, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CLOSED LITTLE YUS - {0xA659, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CLOSED LITTLE YUS - {0xA65A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BLENDED YUS - {0xA65B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BLENDED YUS - {0xA65C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED CLOSED LITT - {0xA65D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED CLOSED LITTLE - {0xA65E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YN - {0xA65F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YN - {0xA660, 0xA661, propertyUNASSIGNED}, // .. - {0xA662, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT DE - {0xA663, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT DE - {0xA664, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EL - {0xA665, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EL - {0xA666, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EM - {0xA667, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EM - {0xA668, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOCULAR O - {0xA669, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOCULAR O - {0xA66A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BINOCULAR O - {0xA66B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BINOCULAR O - {0xA66C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DOUBLE MONOCULAR O - {0xA66D, 0xA66F, propertyPVALID}, // CYRILLIC SMALL LETTER DOUBLE MONOCULAR O..CO - {0xA670, 0xA673, propertyDISALLOWED}, // COMBINING CYRILLIC TEN MILLIONS SIGN..SLAVON - {0xA674, 0xA67B, propertyUNASSIGNED}, // .. - {0xA67C, 0xA67D, propertyPVALID}, // COMBINING CYRILLIC KAVYKA..COMBINING CYRILLI - {0xA67E, 0x0, propertyDISALLOWED}, // CYRILLIC KAVYKA - {0xA67F, 0x0, propertyPVALID}, // CYRILLIC PAYEROK - {0xA680, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DWE - {0xA681, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DWE - {0xA682, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZWE - {0xA683, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZWE - {0xA684, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHWE - {0xA685, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHWE - {0xA686, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CCHE - {0xA687, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CCHE - {0xA688, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZZE - {0xA689, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZZE - {0xA68A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH MIDDLE HOOK - {0xA68B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH MIDDLE HOOK - {0xA68C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TWE - {0xA68D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TWE - {0xA68E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSWE - {0xA68F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSWE - {0xA690, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSSE - {0xA691, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSSE - {0xA692, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TCHE - {0xA693, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TCHE - {0xA694, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HWE - {0xA695, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HWE - {0xA696, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHWE - {0xA697, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHWE - {0xA698, 0xA69F, propertyUNASSIGNED}, // .. - {0xA6A0, 0xA6E5, propertyPVALID}, // BAMUM LETTER A..BAMUM LETTER KI - {0xA6E6, 0xA6EF, propertyDISALLOWED}, // BAMUM LETTER MO..BAMUM LETTER KOGHOM - {0xA6F0, 0xA6F1, propertyPVALID}, // BAMUM COMBINING MARK KOQNDON..BAMUM COMBININ - {0xA6F2, 0xA6F7, propertyDISALLOWED}, // BAMUM NJAEMLI..BAMUM QUESTION MARK - {0xA6F8, 0xA6FF, propertyUNASSIGNED}, // .. - {0xA700, 0xA716, propertyDISALLOWED}, // MODIFIER LETTER CHINESE TONE YIN PING..MODIF - {0xA717, 0xA71F, propertyPVALID}, // MODIFIER LETTER DOT VERTICAL BAR..MODIFIER L - {0xA720, 0xA722, propertyDISALLOWED}, // MODIFIER LETTER STRESS AND HIGH TONE..LATIN - {0xA723, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL ALEF - {0xA724, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EGYPTOLOGICAL AIN - {0xA725, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL AIN - {0xA726, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HENG - {0xA727, 0x0, propertyPVALID}, // LATIN SMALL LETTER HENG - {0xA728, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TZ - {0xA729, 0x0, propertyPVALID}, // LATIN SMALL LETTER TZ - {0xA72A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TRESILLO - {0xA72B, 0x0, propertyPVALID}, // LATIN SMALL LETTER TRESILLO - {0xA72C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO - {0xA72D, 0x0, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO - {0xA72E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO WITH COMMA - {0xA72F, 0xA731, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO WITH COMMA..LAT - {0xA732, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AA - {0xA733, 0x0, propertyPVALID}, // LATIN SMALL LETTER AA - {0xA734, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AO - {0xA735, 0x0, propertyPVALID}, // LATIN SMALL LETTER AO - {0xA736, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AU - {0xA737, 0x0, propertyPVALID}, // LATIN SMALL LETTER AU - {0xA738, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV - {0xA739, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV - {0xA73A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR - {0xA73B, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV WITH HORIZONTAL BAR - {0xA73C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AY - {0xA73D, 0x0, propertyPVALID}, // LATIN SMALL LETTER AY - {0xA73E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED C WITH DOT - {0xA73F, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C WITH DOT - {0xA740, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE - {0xA741, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE - {0xA742, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DIAGONAL STROKE - {0xA743, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DIAGONAL STROKE - {0xA744, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE AND DIAGO - {0xA745, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE AND DIAGONA - {0xA746, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER BROKEN L - {0xA747, 0x0, propertyPVALID}, // LATIN SMALL LETTER BROKEN L - {0xA748, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH HIGH STROKE - {0xA749, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH HIGH STROKE - {0xA74A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LONG STROKE OVER - {0xA74B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LONG STROKE OVERLA - {0xA74C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LOOP - {0xA74D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LOOP - {0xA74E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OO - {0xA74F, 0x0, propertyPVALID}, // LATIN SMALL LETTER OO - {0xA750, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH STROKE THROUGH D - {0xA751, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH STROKE THROUGH DES - {0xA752, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH FLOURISH - {0xA753, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH FLOURISH - {0xA754, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH SQUIRREL TAIL - {0xA755, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH SQUIRREL TAIL - {0xA756, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH STROKE THROUGH D - {0xA757, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH STROKE THROUGH DES - {0xA758, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE - {0xA759, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH DIAGONAL STROKE - {0xA75A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R ROTUNDA - {0xA75B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R ROTUNDA - {0xA75C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER RUM ROTUNDA - {0xA75D, 0x0, propertyPVALID}, // LATIN SMALL LETTER RUM ROTUNDA - {0xA75E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DIAGONAL STROKE - {0xA75F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DIAGONAL STROKE - {0xA760, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VY - {0xA761, 0x0, propertyPVALID}, // LATIN SMALL LETTER VY - {0xA762, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VISIGOTHIC Z - {0xA763, 0x0, propertyPVALID}, // LATIN SMALL LETTER VISIGOTHIC Z - {0xA764, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE - {0xA765, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE - {0xA766, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE THROU - {0xA767, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE THROUGH - {0xA768, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VEND - {0xA769, 0x0, propertyPVALID}, // LATIN SMALL LETTER VEND - {0xA76A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ET - {0xA76B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ET - {0xA76C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER IS - {0xA76D, 0x0, propertyPVALID}, // LATIN SMALL LETTER IS - {0xA76E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CON - {0xA76F, 0x0, propertyPVALID}, // LATIN SMALL LETTER CON - {0xA770, 0x0, propertyDISALLOWED}, // MODIFIER LETTER US - {0xA771, 0xA778, propertyPVALID}, // LATIN SMALL LETTER DUM..LATIN SMALL LETTER U - {0xA779, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR D - {0xA77A, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR D - {0xA77B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR F - {0xA77C, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR F - {0xA77D, 0xA77E, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR G..LATIN CAPITA - {0xA77F, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED INSULAR G - {0xA780, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED L - {0xA781, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED L - {0xA782, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR R - {0xA783, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR R - {0xA784, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR S - {0xA785, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR S - {0xA786, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR T - {0xA787, 0xA788, propertyPVALID}, // LATIN SMALL LETTER INSULAR T..MODIFIER LETTE - {0xA789, 0xA78B, propertyDISALLOWED}, // MODIFIER LETTER COLON..LATIN CAPITAL LETTER - {0xA78C, 0x0, propertyPVALID}, // LATIN SMALL LETTER SALTILLO - {0xA78D, 0xA7FA, propertyUNASSIGNED}, // .. - {0xA7FB, 0xA827, propertyPVALID}, // LATIN EPIGRAPHIC LETTER REVERSED F..SYLOTI N - {0xA828, 0xA82B, propertyDISALLOWED}, // SYLOTI NAGRI POETRY MARK-1..SYLOTI NAGRI POE - {0xA82C, 0xA82F, propertyUNASSIGNED}, // .. - {0xA830, 0xA839, propertyDISALLOWED}, // NORTH INDIC FRACTION ONE QUARTER..NORTH INDI - {0xA83A, 0xA83F, propertyUNASSIGNED}, // .. - {0xA840, 0xA873, propertyPVALID}, // PHAGS-PA LETTER KA..PHAGS-PA LETTER CANDRABI - {0xA874, 0xA877, propertyDISALLOWED}, // PHAGS-PA SINGLE HEAD MARK..PHAGS-PA MARK DOU - {0xA878, 0xA87F, propertyUNASSIGNED}, // .. - {0xA880, 0xA8C4, propertyPVALID}, // SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VI - {0xA8C5, 0xA8CD, propertyUNASSIGNED}, // .. - {0xA8CE, 0xA8CF, propertyDISALLOWED}, // SAURASHTRA DANDA..SAURASHTRA DOUBLE DANDA - {0xA8D0, 0xA8D9, propertyPVALID}, // SAURASHTRA DIGIT ZERO..SAURASHTRA DIGIT NINE - {0xA8DA, 0xA8DF, propertyUNASSIGNED}, // .. - {0xA8E0, 0xA8F7, propertyPVALID}, // COMBINING DEVANAGARI DIGIT ZERO..DEVANAGARI - {0xA8F8, 0xA8FA, propertyDISALLOWED}, // DEVANAGARI SIGN PUSHPIKA..DEVANAGARI CARET - {0xA8FB, 0x0, propertyPVALID}, // DEVANAGARI HEADSTROKE - {0xA8FC, 0xA8FF, propertyUNASSIGNED}, // .. - {0xA900, 0xA92D, propertyPVALID}, // KAYAH LI DIGIT ZERO..KAYAH LI TONE CALYA PLO - {0xA92E, 0xA92F, propertyDISALLOWED}, // KAYAH LI SIGN CWI..KAYAH LI SIGN SHYA - {0xA930, 0xA953, propertyPVALID}, // REJANG LETTER KA..REJANG VIRAMA - {0xA954, 0xA95E, propertyUNASSIGNED}, // .. - {0xA95F, 0xA97C, propertyDISALLOWED}, // REJANG SECTION MARK..HANGUL CHOSEONG SSANGYE - {0xA97D, 0xA97F, propertyUNASSIGNED}, // .. - {0xA980, 0xA9C0, propertyPVALID}, // JAVANESE SIGN PANYANGGA..JAVANESE PANGKON - {0xA9C1, 0xA9CD, propertyDISALLOWED}, // JAVANESE LEFT RERENGGAN..JAVANESE TURNED PAD - {0xA9CE, 0x0, propertyUNASSIGNED}, // - {0xA9CF, 0xA9D9, propertyPVALID}, // JAVANESE PANGRANGKEP..JAVANESE DIGIT NINE - {0xA9DA, 0xA9DD, propertyUNASSIGNED}, // .. - {0xA9DE, 0xA9DF, propertyDISALLOWED}, // JAVANESE PADA TIRTA TUMETES..JAVANESE PADA I - {0xA9E0, 0xA9FF, propertyUNASSIGNED}, // .. - {0xAA00, 0xAA36, propertyPVALID}, // CHAM LETTER A..CHAM CONSONANT SIGN WA - {0xAA37, 0xAA3F, propertyUNASSIGNED}, // .. - {0xAA40, 0xAA4D, propertyPVALID}, // CHAM LETTER FINAL K..CHAM CONSONANT SIGN FIN - {0xAA4E, 0xAA4F, propertyUNASSIGNED}, // .. - {0xAA50, 0xAA59, propertyPVALID}, // CHAM DIGIT ZERO..CHAM DIGIT NINE - {0xAA5A, 0xAA5B, propertyUNASSIGNED}, // .. - {0xAA5C, 0xAA5F, propertyDISALLOWED}, // CHAM PUNCTUATION SPIRAL..CHAM PUNCTUATION TR - {0xAA60, 0xAA76, propertyPVALID}, // MYANMAR LETTER KHAMTI GA..MYANMAR LOGOGRAM K - {0xAA77, 0xAA79, propertyDISALLOWED}, // MYANMAR SYMBOL AITON EXCLAMATION..MYANMAR SY - {0xAA7A, 0xAA7B, propertyPVALID}, // MYANMAR LETTER AITON RA..MYANMAR SIGN PAO KA - {0xAA7C, 0xAA7F, propertyUNASSIGNED}, // .. - {0xAA80, 0xAAC2, propertyPVALID}, // TAI VIET LETTER LOW KO..TAI VIET TONE MAI SO - {0xAAC3, 0xAADA, propertyUNASSIGNED}, // .. - {0xAADB, 0xAADD, propertyPVALID}, // TAI VIET SYMBOL KON..TAI VIET SYMBOL SAM - {0xAADE, 0xAADF, propertyDISALLOWED}, // TAI VIET SYMBOL HO HOI..TAI VIET SYMBOL KOI - {0xAAE0, 0xABBF, propertyUNASSIGNED}, // .. - {0xABC0, 0xABEA, propertyPVALID}, // MEETEI MAYEK LETTER KOK..MEETEI MAYEK VOWEL - {0xABEB, 0x0, propertyDISALLOWED}, // MEETEI MAYEK CHEIKHEI - {0xABEC, 0xABED, propertyPVALID}, // MEETEI MAYEK LUM IYEK..MEETEI MAYEK APUN IYE - {0xABEE, 0xABEF, propertyUNASSIGNED}, // .. - {0xABF0, 0xABF9, propertyPVALID}, // MEETEI MAYEK DIGIT ZERO..MEETEI MAYEK DIGIT - {0xABFA, 0xABFF, propertyUNASSIGNED}, // .. - {0xAC00, 0xD7A3, propertyPVALID}, // .. - {0xD7A4, 0xD7AF, propertyUNASSIGNED}, // .. - {0xD7B0, 0xD7C6, propertyDISALLOWED}, // HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARA - {0xD7C7, 0xD7CA, propertyUNASSIGNED}, // .. - {0xD7CB, 0xD7FB, propertyDISALLOWED}, // HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEO - {0xD7FC, 0xD7FF, propertyUNASSIGNED}, // .. - {0xD800, 0xFA0D, propertyDISALLOWED}, // ..CJK COMPAT - {0xFA0E, 0xFA0F, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA0E..CJK COMPAT - {0xFA10, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA10 - {0xFA11, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA11 - {0xFA12, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA12 - {0xFA13, 0xFA14, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA13..CJK COMPAT - {0xFA15, 0xFA1E, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA15..CJK COMPAT - {0xFA1F, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA1F - {0xFA20, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA20 - {0xFA21, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA21 - {0xFA22, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA22 - {0xFA23, 0xFA24, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA23..CJK COMPAT - {0xFA25, 0xFA26, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA25..CJK COMPAT - {0xFA27, 0xFA29, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA27..CJK COMPAT - {0xFA2A, 0xFA2D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA2A..CJK COMPAT - {0xFA2E, 0xFA2F, propertyUNASSIGNED}, // .. - {0xFA30, 0xFA6D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA30..CJK COMPAT - {0xFA6E, 0xFA6F, propertyUNASSIGNED}, // .. - {0xFA70, 0xFAD9, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COMPAT - {0xFADA, 0xFAFF, propertyUNASSIGNED}, // .. - {0xFB00, 0xFB06, propertyDISALLOWED}, // LATIN SMALL LIGATURE FF..LATIN SMALL LIGATUR - {0xFB07, 0xFB12, propertyUNASSIGNED}, // .. - {0xFB13, 0xFB17, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE MEN NOW..ARMENIAN SM - {0xFB18, 0xFB1C, propertyUNASSIGNED}, // .. - {0xFB1D, 0x0, propertyDISALLOWED}, // HEBREW LETTER YOD WITH HIRIQ - {0xFB1E, 0x0, propertyPVALID}, // HEBREW POINT JUDEO-SPANISH VARIKA - {0xFB1F, 0xFB36, propertyDISALLOWED}, // HEBREW LIGATURE YIDDISH YOD YOD PATAH..HEBRE - {0xFB37, 0x0, propertyUNASSIGNED}, // - {0xFB38, 0xFB3C, propertyDISALLOWED}, // HEBREW LETTER TET WITH DAGESH..HEBREW LETTER - {0xFB3D, 0x0, propertyUNASSIGNED}, // - {0xFB3E, 0x0, propertyDISALLOWED}, // HEBREW LETTER MEM WITH DAGESH - {0xFB3F, 0x0, propertyUNASSIGNED}, // - {0xFB40, 0xFB41, propertyDISALLOWED}, // HEBREW LETTER NUN WITH DAGESH..HEBREW LETTER - {0xFB42, 0x0, propertyUNASSIGNED}, // - {0xFB43, 0xFB44, propertyDISALLOWED}, // HEBREW LETTER FINAL PE WITH DAGESH..HEBREW L - {0xFB45, 0x0, propertyUNASSIGNED}, // - {0xFB46, 0xFBB1, propertyDISALLOWED}, // HEBREW LETTER TSADI WITH DAGESH..ARABIC LETT - {0xFBB2, 0xFBD2, propertyUNASSIGNED}, // .. - {0xFBD3, 0xFD3F, propertyDISALLOWED}, // ARABIC LETTER NG ISOLATED FORM..ORNATE RIGHT - {0xFD40, 0xFD4F, propertyUNASSIGNED}, // .. - {0xFD50, 0xFD8F, propertyDISALLOWED}, // ARABIC LIGATURE TEH WITH JEEM WITH MEEM INIT - {0xFD90, 0xFD91, propertyUNASSIGNED}, // .. - {0xFD92, 0xFDC7, propertyDISALLOWED}, // ARABIC LIGATURE MEEM WITH JEEM WITH KHAH INI - {0xFDC8, 0xFDCF, propertyUNASSIGNED}, // .. - {0xFDD0, 0xFDFD, propertyDISALLOWED}, // ..ARABIC LIGATURE BISMILLAH AR - {0xFDFE, 0xFDFF, propertyUNASSIGNED}, // .. - {0xFE00, 0xFE19, propertyDISALLOWED}, // VARIATION SELECTOR-1..PRESENTATION FORM FOR - {0xFE1A, 0xFE1F, propertyUNASSIGNED}, // .. - {0xFE20, 0xFE26, propertyPVALID}, // COMBINING LIGATURE LEFT HALF..COMBINING CONJ - {0xFE27, 0xFE2F, propertyUNASSIGNED}, // .. - {0xFE30, 0xFE52, propertyDISALLOWED}, // PRESENTATION FORM FOR VERTICAL TWO DOT LEADE - {0xFE53, 0x0, propertyUNASSIGNED}, // - {0xFE54, 0xFE66, propertyDISALLOWED}, // SMALL SEMICOLON..SMALL EQUALS SIGN - {0xFE67, 0x0, propertyUNASSIGNED}, // - {0xFE68, 0xFE6B, propertyDISALLOWED}, // SMALL REVERSE SOLIDUS..SMALL COMMERCIAL AT - {0xFE6C, 0xFE6F, propertyUNASSIGNED}, // .. - {0xFE70, 0xFE72, propertyDISALLOWED}, // ARABIC FATHATAN ISOLATED FORM..ARABIC DAMMAT - {0xFE73, 0x0, propertyPVALID}, // ARABIC TAIL FRAGMENT - {0xFE74, 0x0, propertyDISALLOWED}, // ARABIC KASRATAN ISOLATED FORM - {0xFE75, 0x0, propertyUNASSIGNED}, // - {0xFE76, 0xFEFC, propertyDISALLOWED}, // ARABIC FATHA ISOLATED FORM..ARABIC LIGATURE - {0xFEFD, 0xFEFE, propertyUNASSIGNED}, // .. - {0xFEFF, 0x0, propertyDISALLOWED}, // ZERO WIDTH NO-BREAK SPACE - {0xFF00, 0x0, propertyUNASSIGNED}, // - {0xFF01, 0xFFBE, propertyDISALLOWED}, // FULLWIDTH EXCLAMATION MARK..HALFWIDTH HANGUL - {0xFFBF, 0xFFC1, propertyUNASSIGNED}, // .. - {0xFFC2, 0xFFC7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER A..HALFWIDTH HANGUL - {0xFFC8, 0xFFC9, propertyUNASSIGNED}, // .. - {0xFFCA, 0xFFCF, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YEO..HALFWIDTH HANGU - {0xFFD0, 0xFFD1, propertyUNASSIGNED}, // .. - {0xFFD2, 0xFFD7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YO..HALFWIDTH HANGUL - {0xFFD8, 0xFFD9, propertyUNASSIGNED}, // .. - {0xFFDA, 0xFFDC, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER EU..HALFWIDTH HANGUL - {0xFFDD, 0xFFDF, propertyUNASSIGNED}, // .. - {0xFFE0, 0xFFE6, propertyDISALLOWED}, // FULLWIDTH CENT SIGN..FULLWIDTH WON SIGN - {0xFFE7, 0x0, propertyUNASSIGNED}, // - {0xFFE8, 0xFFEE, propertyDISALLOWED}, // HALFWIDTH FORMS LIGHT VERTICAL..HALFWIDTH WH - {0xFFEF, 0xFFF8, propertyUNASSIGNED}, // .. - {0xFFF9, 0xFFFF, propertyDISALLOWED}, // INTERLINEAR ANNOTATION ANCHOR.. - {0x1000D, 0x10026, propertyPVALID}, // LINEAR B SYLLABLE B036 JO..LINEAR B SYLLABLE - {0x10027, 0x0, propertyUNASSIGNED}, // - {0x10028, 0x1003A, propertyPVALID}, // LINEAR B SYLLABLE B060 RA..LINEAR B SYLLABLE - {0x1003B, 0x0, propertyUNASSIGNED}, // - {0x1003C, 0x1003D, propertyPVALID}, // LINEAR B SYLLABLE B017 ZA..LINEAR B SYLLABLE - {0x1003E, 0x0, propertyUNASSIGNED}, // - {0x1003F, 0x1004D, propertyPVALID}, // LINEAR B SYLLABLE B020 ZO..LINEAR B SYLLABLE - {0x1004E, 0x1004F, propertyUNASSIGNED}, // .. - {0x10050, 0x1005D, propertyPVALID}, // LINEAR B SYMBOL B018..LINEAR B SYMBOL B089 - {0x1005E, 0x1007F, propertyUNASSIGNED}, // .. - {0x10080, 0x100FA, propertyPVALID}, // LINEAR B IDEOGRAM B100 MAN..LINEAR B IDEOGRA - {0x100FB, 0x100FF, propertyUNASSIGNED}, // .. - {0x10100, 0x10102, propertyDISALLOWED}, // AEGEAN WORD SEPARATOR LINE..AEGEAN CHECK MAR - {0x10103, 0x10106, propertyUNASSIGNED}, // .. - {0x10107, 0x10133, propertyDISALLOWED}, // AEGEAN NUMBER ONE..AEGEAN NUMBER NINETY THOU - {0x10134, 0x10136, propertyUNASSIGNED}, // .. - {0x10137, 0x1018A, propertyDISALLOWED}, // AEGEAN WEIGHT BASE UNIT..GREEK ZERO SIGN - {0x1018B, 0x1018F, propertyUNASSIGNED}, // .. - {0x10190, 0x1019B, propertyDISALLOWED}, // ROMAN SEXTANS SIGN..ROMAN CENTURIAL SIGN - {0x1019C, 0x101CF, propertyUNASSIGNED}, // .. - {0x101D0, 0x101FC, propertyDISALLOWED}, // PHAISTOS DISC SIGN PEDESTRIAN..PHAISTOS DISC - {0x101FD, 0x0, propertyPVALID}, // PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE - {0x101FE, 0x1027F, propertyUNASSIGNED}, // .. - {0x10280, 0x1029C, propertyPVALID}, // LYCIAN LETTER A..LYCIAN LETTER X - {0x1029D, 0x1029F, propertyUNASSIGNED}, // .. - {0x102A0, 0x102D0, propertyPVALID}, // CARIAN LETTER A..CARIAN LETTER UUU3 - {0x102D1, 0x102FF, propertyUNASSIGNED}, // .. - {0x10300, 0x1031E, propertyPVALID}, // OLD ITALIC LETTER A..OLD ITALIC LETTER UU - {0x1031F, 0x0, propertyUNASSIGNED}, // - {0x10320, 0x10323, propertyDISALLOWED}, // OLD ITALIC NUMERAL ONE..OLD ITALIC NUMERAL F - {0x10324, 0x1032F, propertyUNASSIGNED}, // .. - {0x10330, 0x10340, propertyPVALID}, // GOTHIC LETTER AHSA..GOTHIC LETTER PAIRTHRA - {0x10341, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINETY - {0x10342, 0x10349, propertyPVALID}, // GOTHIC LETTER RAIDA..GOTHIC LETTER OTHAL - {0x1034A, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINE HUNDRED - {0x1034B, 0x1037F, propertyUNASSIGNED}, // .. - {0x10380, 0x1039D, propertyPVALID}, // UGARITIC LETTER ALPA..UGARITIC LETTER SSU - {0x1039E, 0x0, propertyUNASSIGNED}, // - {0x1039F, 0x0, propertyDISALLOWED}, // UGARITIC WORD DIVIDER - {0x103A0, 0x103C3, propertyPVALID}, // OLD PERSIAN SIGN A..OLD PERSIAN SIGN HA - {0x103C4, 0x103C7, propertyUNASSIGNED}, // .. - {0x103C8, 0x103CF, propertyPVALID}, // OLD PERSIAN SIGN AURAMAZDAA..OLD PERSIAN SIG - {0x103D0, 0x103D5, propertyDISALLOWED}, // OLD PERSIAN WORD DIVIDER..OLD PERSIAN NUMBER - {0x103D6, 0x103FF, propertyUNASSIGNED}, // .. - {0x10400, 0x10427, propertyDISALLOWED}, // DESERET CAPITAL LETTER LONG I..DESERET CAPIT - {0x10428, 0x1049D, propertyPVALID}, // DESERET SMALL LETTER LONG I..OSMANYA LETTER - {0x1049E, 0x1049F, propertyUNASSIGNED}, // .. - {0x104A0, 0x104A9, propertyPVALID}, // OSMANYA DIGIT ZERO..OSMANYA DIGIT NINE - {0x104AA, 0x107FF, propertyUNASSIGNED}, // .. - {0x10800, 0x10805, propertyPVALID}, // CYPRIOT SYLLABLE A..CYPRIOT SYLLABLE JA - {0x10806, 0x10807, propertyUNASSIGNED}, // .. - {0x10808, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE JO - {0x10809, 0x0, propertyUNASSIGNED}, // - {0x1080A, 0x10835, propertyPVALID}, // CYPRIOT SYLLABLE KA..CYPRIOT SYLLABLE WO - {0x10836, 0x0, propertyUNASSIGNED}, // - {0x10837, 0x10838, propertyPVALID}, // CYPRIOT SYLLABLE XA..CYPRIOT SYLLABLE XE - {0x10839, 0x1083B, propertyUNASSIGNED}, // .. - {0x1083C, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE ZA - {0x1083D, 0x1083E, propertyUNASSIGNED}, // .. - {0x1083F, 0x10855, propertyPVALID}, // CYPRIOT SYLLABLE ZO..IMPERIAL ARAMAIC LETTER - {0x10856, 0x0, propertyUNASSIGNED}, // - {0x10857, 0x1085F, propertyDISALLOWED}, // IMPERIAL ARAMAIC SECTION SIGN..IMPERIAL ARAM - {0x10860, 0x108FF, propertyUNASSIGNED}, // .. - {0x10900, 0x10915, propertyPVALID}, // PHOENICIAN LETTER ALF..PHOENICIAN LETTER TAU - {0x10916, 0x1091B, propertyDISALLOWED}, // PHOENICIAN NUMBER ONE..PHOENICIAN NUMBER THR - {0x1091C, 0x1091E, propertyUNASSIGNED}, // .. - {0x1091F, 0x0, propertyDISALLOWED}, // PHOENICIAN WORD SEPARATOR - {0x10920, 0x10939, propertyPVALID}, // LYDIAN LETTER A..LYDIAN LETTER C - {0x1093A, 0x1093E, propertyUNASSIGNED}, // .. - {0x1093F, 0x0, propertyDISALLOWED}, // LYDIAN TRIANGULAR MARK - {0x10940, 0x109FF, propertyUNASSIGNED}, // .. - {0x10A00, 0x10A03, propertyPVALID}, // KHAROSHTHI LETTER A..KHAROSHTHI VOWEL SIGN V - {0x10A04, 0x0, propertyUNASSIGNED}, // - {0x10A05, 0x10A06, propertyPVALID}, // KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SI - {0x10A07, 0x10A0B, propertyUNASSIGNED}, // .. - {0x10A0C, 0x10A13, propertyPVALID}, // KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI LET - {0x10A14, 0x0, propertyUNASSIGNED}, // - {0x10A15, 0x10A17, propertyPVALID}, // KHAROSHTHI LETTER CA..KHAROSHTHI LETTER JA - {0x10A18, 0x0, propertyUNASSIGNED}, // - {0x10A19, 0x10A33, propertyPVALID}, // KHAROSHTHI LETTER NYA..KHAROSHTHI LETTER TTT - {0x10A34, 0x10A37, propertyUNASSIGNED}, // .. - {0x10A38, 0x10A3A, propertyPVALID}, // KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN D - {0x10A3B, 0x10A3E, propertyUNASSIGNED}, // .. - {0x10A3F, 0x0, propertyPVALID}, // KHAROSHTHI VIRAMA - {0x10A40, 0x10A47, propertyDISALLOWED}, // KHAROSHTHI DIGIT ONE..KHAROSHTHI NUMBER ONE - {0x10A48, 0x10A4F, propertyUNASSIGNED}, // .. - {0x10A50, 0x10A58, propertyDISALLOWED}, // KHAROSHTHI PUNCTUATION DOT..KHAROSHTHI PUNCT - {0x10A59, 0x10A5F, propertyUNASSIGNED}, // .. - {0x10A60, 0x10A7C, propertyPVALID}, // OLD SOUTH ARABIAN LETTER HE..OLD SOUTH ARABI - {0x10A7D, 0x10A7F, propertyDISALLOWED}, // OLD SOUTH ARABIAN NUMBER ONE..OLD SOUTH ARAB - {0x10A80, 0x10AFF, propertyUNASSIGNED}, // .. - {0x10B00, 0x10B35, propertyPVALID}, // AVESTAN LETTER A..AVESTAN LETTER HE - {0x10B36, 0x10B38, propertyUNASSIGNED}, // .. - {0x10B39, 0x10B3F, propertyDISALLOWED}, // AVESTAN ABBREVIATION MARK..LARGE ONE RING OV - {0x10B40, 0x10B55, propertyPVALID}, // INSCRIPTIONAL PARTHIAN LETTER ALEPH..INSCRIP - {0x10B56, 0x10B57, propertyUNASSIGNED}, // .. - {0x10B58, 0x10B5F, propertyDISALLOWED}, // INSCRIPTIONAL PARTHIAN NUMBER ONE..INSCRIPTI - {0x10B60, 0x10B72, propertyPVALID}, // INSCRIPTIONAL PAHLAVI LETTER ALEPH..INSCRIPT - {0x10B73, 0x10B77, propertyUNASSIGNED}, // .. - {0x10B78, 0x10B7F, propertyDISALLOWED}, // INSCRIPTIONAL PAHLAVI NUMBER ONE..INSCRIPTIO - {0x10B80, 0x10BFF, propertyUNASSIGNED}, // .. - {0x10C00, 0x10C48, propertyPVALID}, // OLD TURKIC LETTER ORKHON A..OLD TURKIC LETTE - {0x10C49, 0x10E5F, propertyUNASSIGNED}, // .. - {0x10E60, 0x10E7E, propertyDISALLOWED}, // RUMI DIGIT ONE..RUMI FRACTION TWO THIRDS - {0x10E7F, 0x1107F, propertyUNASSIGNED}, // .. - {0x11080, 0x110BA, propertyPVALID}, // KAITHI SIGN CANDRABINDU..KAITHI SIGN NUKTA - {0x110BB, 0x110C1, propertyDISALLOWED}, // KAITHI ABBREVIATION SIGN..KAITHI DOUBLE DAND - {0x110C2, 0x11FFF, propertyUNASSIGNED}, // .. - {0x12000, 0x1236E, propertyPVALID}, // CUNEIFORM SIGN A..CUNEIFORM SIGN ZUM - {0x1236F, 0x123FF, propertyUNASSIGNED}, // .. - {0x12400, 0x12462, propertyDISALLOWED}, // CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NU - {0x12463, 0x1246F, propertyUNASSIGNED}, // .. - {0x12470, 0x12473, propertyDISALLOWED}, // CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD - {0x12474, 0x12FFF, propertyUNASSIGNED}, // .. - {0x13000, 0x1342E, propertyPVALID}, // EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYP - {0x1342F, 0x1CFFF, propertyUNASSIGNED}, // .. - {0x1D000, 0x1D0F5, propertyDISALLOWED}, // BYZANTINE MUSICAL SYMBOL PSILI..BYZANTINE MU - {0x1D0F6, 0x1D0FF, propertyUNASSIGNED}, // .. - {0x1D100, 0x1D126, propertyDISALLOWED}, // MUSICAL SYMBOL SINGLE BARLINE..MUSICAL SYMBO - {0x1D127, 0x1D128, propertyUNASSIGNED}, // .. - {0x1D129, 0x1D1DD, propertyDISALLOWED}, // MUSICAL SYMBOL MULTIPLE MEASURE REST..MUSICA - {0x1D1DE, 0x1D1FF, propertyUNASSIGNED}, // .. - {0x1D200, 0x1D245, propertyDISALLOWED}, // GREEK VOCAL NOTATION SYMBOL-1..GREEK MUSICAL - {0x1D246, 0x1D2FF, propertyUNASSIGNED}, // .. - {0x1D300, 0x1D356, propertyDISALLOWED}, // MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING - {0x1D357, 0x1D35F, propertyUNASSIGNED}, // .. - {0x1D360, 0x1D371, propertyDISALLOWED}, // COUNTING ROD UNIT DIGIT ONE..COUNTING ROD TE - {0x1D372, 0x1D3FF, propertyUNASSIGNED}, // .. - {0x1D400, 0x1D454, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL A..MATHEMATICAL IT - {0x1D455, 0x0, propertyUNASSIGNED}, // - {0x1D456, 0x1D49C, propertyDISALLOWED}, // MATHEMATICAL ITALIC SMALL I..MATHEMATICAL SC - {0x1D49D, 0x0, propertyUNASSIGNED}, // - {0x1D49E, 0x1D49F, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL C..MATHEMATICAL - {0x1D4A0, 0x1D4A1, propertyUNASSIGNED}, // .. - {0x1D4A2, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL G - {0x1D4A3, 0x1D4A4, propertyUNASSIGNED}, // .. - {0x1D4A5, 0x1D4A6, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL J..MATHEMATICAL - {0x1D4A7, 0x1D4A8, propertyUNASSIGNED}, // .. - {0x1D4A9, 0x1D4AC, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL N..MATHEMATICAL - {0x1D4AD, 0x0, propertyUNASSIGNED}, // - {0x1D4AE, 0x1D4B9, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL S..MATHEMATICAL - {0x1D4BA, 0x0, propertyUNASSIGNED}, // - {0x1D4BB, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL F - {0x1D4BC, 0x0, propertyUNASSIGNED}, // - {0x1D4BD, 0x1D4C3, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL H..MATHEMATICAL SC - {0x1D4C4, 0x0, propertyUNASSIGNED}, // - {0x1D4C5, 0x1D505, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL P..MATHEMATICAL FR - {0x1D506, 0x0, propertyUNASSIGNED}, // - {0x1D507, 0x1D50A, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL D..MATHEMATICAL - {0x1D50B, 0x1D50C, propertyUNASSIGNED}, // .. - {0x1D50D, 0x1D514, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL J..MATHEMATICAL - {0x1D515, 0x0, propertyUNASSIGNED}, // - {0x1D516, 0x1D51C, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL S..MATHEMATICAL - {0x1D51D, 0x0, propertyUNASSIGNED}, // - {0x1D51E, 0x1D539, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR SMALL A..MATHEMATICAL D - {0x1D53A, 0x0, propertyUNASSIGNED}, // - {0x1D53B, 0x1D53E, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL D..MATHEM - {0x1D53F, 0x0, propertyUNASSIGNED}, // - {0x1D540, 0x1D544, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL I..MATHEM - {0x1D545, 0x0, propertyUNASSIGNED}, // - {0x1D546, 0x0, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL O - {0x1D547, 0x1D549, propertyUNASSIGNED}, // .. - {0x1D54A, 0x1D550, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL S..MATHEM - {0x1D551, 0x0, propertyUNASSIGNED}, // - {0x1D552, 0x1D6A5, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK SMALL A..MATHEMAT - {0x1D6A6, 0x1D6A7, propertyUNASSIGNED}, // .. - {0x1D6A8, 0x1D7CB, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL ALPHA..MATHEMATICA - {0x1D7CC, 0x1D7CD, propertyUNASSIGNED}, // .. - {0x1D7CE, 0x1D7FF, propertyDISALLOWED}, // MATHEMATICAL BOLD DIGIT ZERO..MATHEMATICAL M - {0x1D800, 0x1EFFF, propertyUNASSIGNED}, // .. - {0x1F000, 0x1F02B, propertyDISALLOWED}, // MAHJONG TILE EAST WIND..MAHJONG TILE BACK - {0x1F02C, 0x1F02F, propertyUNASSIGNED}, // .. - {0x1F030, 0x1F093, propertyDISALLOWED}, // DOMINO TILE HORIZONTAL BACK..DOMINO TILE VER - {0x1F094, 0x1F0FF, propertyUNASSIGNED}, // .. - {0x1F100, 0x1F10A, propertyDISALLOWED}, // DIGIT ZERO FULL STOP..DIGIT NINE COMMA - {0x1F10B, 0x1F10F, propertyUNASSIGNED}, // .. - {0x1F110, 0x1F12E, propertyDISALLOWED}, // PARENTHESIZED LATIN CAPITAL LETTER A..CIRCLE - {0x1F12F, 0x1F130, propertyUNASSIGNED}, // .. - {0x1F131, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER B - {0x1F132, 0x1F13C, propertyUNASSIGNED}, // .. - {0x1F13D, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER N - {0x1F13E, 0x0, propertyUNASSIGNED}, // - {0x1F13F, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER P - {0x1F140, 0x1F141, propertyUNASSIGNED}, // .. - {0x1F142, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER S - {0x1F143, 0x1F145, propertyUNASSIGNED}, // .. - {0x1F146, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER W - {0x1F147, 0x1F149, propertyUNASSIGNED}, // .. - {0x1F14A, 0x1F14E, propertyDISALLOWED}, // SQUARED HV..SQUARED PPV - {0x1F14F, 0x1F156, propertyUNASSIGNED}, // .. - {0x1F157, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER H - {0x1F158, 0x1F15E, propertyUNASSIGNED}, // .. - {0x1F15F, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER P - {0x1F160, 0x1F178, propertyUNASSIGNED}, // .. - {0x1F179, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER J - {0x1F17A, 0x0, propertyUNASSIGNED}, // - {0x1F17B, 0x1F17C, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER L..NEG - {0x1F17D, 0x1F17E, propertyUNASSIGNED}, // .. - {0x1F17F, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER P - {0x1F180, 0x1F189, propertyUNASSIGNED}, // .. - {0x1F18A, 0x1F18D, propertyDISALLOWED}, // CROSSED NEGATIVE SQUARED LATIN CAPITAL LETTE - {0x1F18E, 0x1F18F, propertyUNASSIGNED}, // .. - {0x1F190, 0x0, propertyDISALLOWED}, // SQUARE DJ - {0x1F191, 0x1F1FF, propertyUNASSIGNED}, // .. - {0x1F200, 0x0, propertyDISALLOWED}, // SQUARE HIRAGANA HOKA - {0x1F201, 0x1F20F, propertyUNASSIGNED}, // .. - {0x1F210, 0x1F231, propertyDISALLOWED}, // SQUARED CJK UNIFIED IDEOGRAPH-624B..SQUARED - {0x1F232, 0x1F23F, propertyUNASSIGNED}, // .. - {0x1F240, 0x1F248, propertyDISALLOWED}, // TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRA - {0x1F249, 0x1FFFD, propertyUNASSIGNED}, // .. - {0x1FFFE, 0x1FFFF, propertyDISALLOWED}, // .. - {0x20000, 0x2A6D6, propertyPVALID}, // .... - {0x2A700, 0x2B734, propertyPVALID}, // .... - {0x2F800, 0x2FA1D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPA - {0x2FA1E, 0x2FFFD, propertyUNASSIGNED}, // .. - {0x2FFFE, 0x2FFFF, propertyDISALLOWED}, // .. - {0x30000, 0x3FFFD, propertyUNASSIGNED}, // .. - {0x3FFFE, 0x3FFFF, propertyDISALLOWED}, // .. - {0x40000, 0x4FFFD, propertyUNASSIGNED}, // .. - {0x4FFFE, 0x4FFFF, propertyDISALLOWED}, // .. - {0x50000, 0x5FFFD, propertyUNASSIGNED}, // .. - {0x5FFFE, 0x5FFFF, propertyDISALLOWED}, // .. - {0x60000, 0x6FFFD, propertyUNASSIGNED}, // .. - {0x6FFFE, 0x6FFFF, propertyDISALLOWED}, // .. - {0x70000, 0x7FFFD, propertyUNASSIGNED}, // .. - {0x7FFFE, 0x7FFFF, propertyDISALLOWED}, // .. - {0x80000, 0x8FFFD, propertyUNASSIGNED}, // .. - {0x8FFFE, 0x8FFFF, propertyDISALLOWED}, // .. - {0x90000, 0x9FFFD, propertyUNASSIGNED}, // .. - {0x9FFFE, 0x9FFFF, propertyDISALLOWED}, // .. - {0xA0000, 0xAFFFD, propertyUNASSIGNED}, // .. - {0xAFFFE, 0xAFFFF, propertyDISALLOWED}, // .. - {0xB0000, 0xBFFFD, propertyUNASSIGNED}, // .. - {0xBFFFE, 0xBFFFF, propertyDISALLOWED}, // .. - {0xC0000, 0xCFFFD, propertyUNASSIGNED}, // .. - {0xCFFFE, 0xCFFFF, propertyDISALLOWED}, // .. - {0xD0000, 0xDFFFD, propertyUNASSIGNED}, // .. - {0xDFFFE, 0xDFFFF, propertyDISALLOWED}, // .. - {0xE0000, 0x0, propertyUNASSIGNED}, // - {0xE0001, 0x0, propertyDISALLOWED}, // LANGUAGE TAG - {0xE0002, 0xE001F, propertyUNASSIGNED}, // .. - {0xE0020, 0xE007F, propertyDISALLOWED}, // TAG SPACE..CANCEL TAG - {0xE0080, 0xE00FF, propertyUNASSIGNED}, // .. - {0xE0100, 0xE01EF, propertyDISALLOWED}, // VARIATION SELECTOR-17..VARIATION SELECTOR-25 - {0xE01F0, 0xEFFFD, propertyUNASSIGNED}, // .. - {0xEFFFE, 0x10FFFF, propertyDISALLOWED}, // .. -} diff --git a/vendor/github.com/miekg/dns/idn/example_test.go b/vendor/github.com/miekg/dns/idn/example_test.go deleted file mode 100644 index 8833cd91..00000000 --- a/vendor/github.com/miekg/dns/idn/example_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package idn_test - -import ( - "fmt" - "github.com/miekg/dns/idn" -) - -func ExampleToPunycode() { - name := "インターãƒãƒƒãƒˆ.テスト" - fmt.Printf("%s -> %s", name, idn.ToPunycode(name)) - // Output: インターãƒãƒƒãƒˆ.テスト -> xn--eckucmux0ukc.xn--zckzah -} - -func ExampleFromPunycode() { - name := "xn--mgbaja8a1hpac.xn--mgbachtv" - fmt.Printf("%s -> %s", name, idn.FromPunycode(name)) - // Output: xn--mgbaja8a1hpac.xn--mgbachtv -> الانترنت.اختبار -} diff --git a/vendor/github.com/miekg/dns/idn/punycode.go b/vendor/github.com/miekg/dns/idn/punycode.go deleted file mode 100644 index 1d03bf6a..00000000 --- a/vendor/github.com/miekg/dns/idn/punycode.go +++ /dev/null @@ -1,370 +0,0 @@ -// Package idn implements encoding from and to punycode as speficied by RFC 3492. -package idn - -import ( - "bytes" - "strings" - "unicode" - "unicode/utf8" - - "github.com/miekg/dns" -) - -// Implementation idea from RFC itself and from from IDNA::Punycode created by -// Tatsuhiko Miyagawa and released under Perl Artistic -// License in 2002. - -const ( - _MIN rune = 1 - _MAX rune = 26 - _SKEW rune = 38 - _BASE rune = 36 - _BIAS rune = 72 - _N rune = 128 - _DAMP rune = 700 - - _DELIMITER = '-' - _PREFIX = "xn--" -) - -// ToPunycode converts unicode domain names to DNS-appropriate punycode names. -// This function will return an empty string result for domain names with -// invalid unicode strings. This function expects domain names in lowercase. -func ToPunycode(s string) string { - // Early check to see if encoding is needed. - // This will prevent making heap allocations when not needed. - if !needToPunycode(s) { - return s - } - - tokens := dns.SplitDomainName(s) - switch { - case s == "": - return "" - case tokens == nil: // s == . - return "." - case s[len(s)-1] == '.': - tokens = append(tokens, "") - } - - for i := range tokens { - t := encode([]byte(tokens[i])) - if t == nil { - return "" - } - tokens[i] = string(t) - } - return strings.Join(tokens, ".") -} - -// FromPunycode returns unicode domain name from provided punycode string. -// This function expects punycode strings in lowercase. -func FromPunycode(s string) string { - // Early check to see if decoding is needed. - // This will prevent making heap allocations when not needed. - if !needFromPunycode(s) { - return s - } - - tokens := dns.SplitDomainName(s) - switch { - case s == "": - return "" - case tokens == nil: // s == . - return "." - case s[len(s)-1] == '.': - tokens = append(tokens, "") - } - for i := range tokens { - tokens[i] = string(decode([]byte(tokens[i]))) - } - return strings.Join(tokens, ".") -} - -// digitval converts single byte into meaningful value that's used to calculate decoded unicode character. -const errdigit = 0xffff - -func digitval(code rune) rune { - switch { - case code >= 'A' && code <= 'Z': - return code - 'A' - case code >= 'a' && code <= 'z': - return code - 'a' - case code >= '0' && code <= '9': - return code - '0' + 26 - } - return errdigit -} - -// lettercode finds BASE36 byte (a-z0-9) based on calculated number. -func lettercode(digit rune) rune { - switch { - case digit >= 0 && digit <= 25: - return digit + 'a' - case digit >= 26 && digit <= 36: - return digit - 26 + '0' - } - panic("dns: not reached") -} - -// adapt calculates next bias to be used for next iteration delta. -func adapt(delta rune, numpoints int, firsttime bool) rune { - if firsttime { - delta /= _DAMP - } else { - delta /= 2 - } - - var k rune - for delta = delta + delta/rune(numpoints); delta > (_BASE-_MIN)*_MAX/2; k += _BASE { - delta /= _BASE - _MIN - } - - return k + ((_BASE-_MIN+1)*delta)/(delta+_SKEW) -} - -// next finds minimal rune (one with lowest codepoint value) that should be equal or above boundary. -func next(b []rune, boundary rune) rune { - if len(b) == 0 { - panic("dns: invalid set of runes to determine next one") - } - m := b[0] - for _, x := range b[1:] { - if x >= boundary && (m < boundary || x < m) { - m = x - } - } - return m -} - -// preprune converts unicode rune to lower case. At this time it's not -// supporting all things described in RFCs. -func preprune(r rune) rune { - if unicode.IsUpper(r) { - r = unicode.ToLower(r) - } - return r -} - -// tfunc is a function that helps calculate each character weight. -func tfunc(k, bias rune) rune { - switch { - case k <= bias: - return _MIN - case k >= bias+_MAX: - return _MAX - } - return k - bias -} - -// needToPunycode returns true for strings that require punycode encoding -// (contain unicode characters). -func needToPunycode(s string) bool { - // This function is very similar to bytes.Runes. We don't use bytes.Runes - // because it makes a heap allocation that's not needed here. - for i := 0; len(s) > 0; i++ { - r, l := utf8.DecodeRuneInString(s) - if r > 0x7f { - return true - } - s = s[l:] - } - return false -} - -// needFromPunycode returns true for strings that require punycode decoding. -func needFromPunycode(s string) bool { - if s == "." { - return false - } - - off := 0 - end := false - pl := len(_PREFIX) - sl := len(s) - - // If s starts with _PREFIX. - if sl > pl && s[off:off+pl] == _PREFIX { - return true - } - - for { - // Find the part after the next ".". - off, end = dns.NextLabel(s, off) - if end { - return false - } - // If this parts starts with _PREFIX. - if sl-off > pl && s[off:off+pl] == _PREFIX { - return true - } - } -} - -// encode transforms Unicode input bytes (that represent DNS label) into -// punycode bytestream. This function would return nil if there's an invalid -// character in the label. -func encode(input []byte) []byte { - n, bias := _N, _BIAS - - b := bytes.Runes(input) - for i := range b { - if !isValidRune(b[i]) { - return nil - } - - b[i] = preprune(b[i]) - } - - basic := make([]byte, 0, len(b)) - for _, ltr := range b { - if ltr <= 0x7f { - basic = append(basic, byte(ltr)) - } - } - basiclen := len(basic) - fulllen := len(b) - if basiclen == fulllen { - return basic - } - - var out bytes.Buffer - - out.WriteString(_PREFIX) - if basiclen > 0 { - out.Write(basic) - out.WriteByte(_DELIMITER) - } - - var ( - ltr, nextltr rune - delta, q rune // delta calculation (see rfc) - t, k, cp rune // weight and codepoint calculation - ) - - for h := basiclen; h < fulllen; n, delta = n+1, delta+1 { - nextltr = next(b, n) - delta, n = delta+(nextltr-n)*rune(h+1), nextltr - - for _, ltr = range b { - if ltr < n { - delta++ - } - if ltr == n { - q = delta - for k = _BASE; ; k += _BASE { - t = tfunc(k, bias) - if q < t { - break - } - cp = t + ((q - t) % (_BASE - t)) - out.WriteRune(lettercode(cp)) - q = (q - t) / (_BASE - t) - } - - out.WriteRune(lettercode(q)) - - bias = adapt(delta, h+1, h == basiclen) - h, delta = h+1, 0 - } - } - } - return out.Bytes() -} - -// decode transforms punycode input bytes (that represent DNS label) into Unicode bytestream. -func decode(b []byte) []byte { - src := b // b would move and we need to keep it - - n, bias := _N, _BIAS - if !bytes.HasPrefix(b, []byte(_PREFIX)) { - return b - } - out := make([]rune, 0, len(b)) - b = b[len(_PREFIX):] - for pos := len(b) - 1; pos >= 0; pos-- { - // only last delimiter is our interest - if b[pos] == _DELIMITER { - out = append(out, bytes.Runes(b[:pos])...) - b = b[pos+1:] // trim source string - break - } - } - if len(b) == 0 { - return src - } - var ( - i, oldi, w rune - ch byte - t, digit rune - ln int - ) - - for i = 0; len(b) > 0; i++ { - oldi, w = i, 1 - for k := _BASE; len(b) > 0; k += _BASE { - ch, b = b[0], b[1:] - digit = digitval(rune(ch)) - if digit == errdigit { - return src - } - i += digit * w - if i < 0 { - // safety check for rune overflow - return src - } - - t = tfunc(k, bias) - if digit < t { - break - } - - w *= _BASE - t - } - ln = len(out) + 1 - bias = adapt(i-oldi, ln, oldi == 0) - n += i / rune(ln) - i = i % rune(ln) - // insert - out = append(out, 0) - copy(out[i+1:], out[i:]) - out[i] = n - } - - var ret bytes.Buffer - for _, r := range out { - ret.WriteRune(r) - } - return ret.Bytes() -} - -// isValidRune checks if the character is valid. We will look for the -// character property in the code points list. For now we aren't checking special -// rules in case of contextual property -func isValidRune(r rune) bool { - return findProperty(r) == propertyPVALID -} - -// findProperty will try to check the code point property of the given -// character. It will use a binary search algorithm as we have a slice of -// ordered ranges (average case performance O(log n)) -func findProperty(r rune) property { - imin, imax := 0, len(codePoints) - - for imax >= imin { - imid := (imin + imax) / 2 - - codePoint := codePoints[imid] - if (codePoint.start == r && codePoint.end == 0) || (codePoint.start <= r && codePoint.end >= r) { - return codePoint.state - } - - if (codePoint.end > 0 && codePoint.end < r) || (codePoint.end == 0 && codePoint.start < r) { - imin = imid + 1 - } else { - imax = imid - 1 - } - } - - return propertyUnknown -} diff --git a/vendor/github.com/miekg/dns/idn/punycode_test.go b/vendor/github.com/miekg/dns/idn/punycode_test.go deleted file mode 100644 index 9c9a15f0..00000000 --- a/vendor/github.com/miekg/dns/idn/punycode_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package idn - -import ( - "strings" - "testing" -) - -var testcases = [][2]string{ - {"", ""}, - {"a", "a"}, - {"a-b", "a-b"}, - {"a-b-c", "a-b-c"}, - {"abc", "abc"}, - {"Ñ", "xn--41a"}, - {"zÑ", "xn--z-0ub"}, - {"ÑZ", "xn--z-zub"}, - {"а-Ñ", "xn----7sb8g"}, - {"إختبار", "xn--kgbechtv"}, - {"آزمایشی", "xn--hgbk6aj7f53bba"}, - {"测试", "xn--0zwm56d"}, - {"測試", "xn--g6w251d"}, - {"иÑпытание", "xn--80akhbyknj4f"}, - {"परीकà¥à¤·à¤¾", "xn--11b5bs3a9aj6g"}, - {"δοκιμή", "xn--jxalpdlp"}, - {"테스트", "xn--9t4b11yi5a"}, - {"טעסט", "xn--deba0ad"}, - {"テスト", "xn--zckzah"}, - {"பரிடà¯à®šà¯ˆ", "xn--hlcj6aya9esc7a"}, - {"mamão-com-açúcar", "xn--mamo-com-acar-yeb1e6q"}, - {"σ", "xn--4xa"}, -} - -func TestEncodeDecodePunycode(t *testing.T) { - for _, tst := range testcases { - enc := encode([]byte(tst[0])) - if string(enc) != tst[1] { - t.Errorf("%s encodeded as %s but should be %s", tst[0], enc, tst[1]) - } - dec := decode([]byte(tst[1])) - if string(dec) != strings.ToLower(tst[0]) { - t.Errorf("%s decoded as %s but should be %s", tst[1], dec, strings.ToLower(tst[0])) - } - } -} - -func TestToFromPunycode(t *testing.T) { - for _, tst := range testcases { - // assert unicode.com == punycode.com - full := ToPunycode(tst[0] + ".com") - if full != tst[1]+".com" { - t.Errorf("invalid result from string conversion to punycode, %s and should be %s.com", full, tst[1]) - } - // assert punycode.punycode == unicode.unicode - decoded := FromPunycode(tst[1] + "." + tst[1]) - if decoded != strings.ToLower(tst[0]+"."+tst[0]) { - t.Errorf("invalid result from string conversion to punycode, %s and should be %s.%s", decoded, tst[0], tst[0]) - } - } -} - -func TestEncodeDecodeFinalPeriod(t *testing.T) { - for _, tst := range testcases { - // assert unicode.com. == punycode.com. - full := ToPunycode(tst[0] + ".") - if full != tst[1]+"." { - t.Errorf("invalid result from string conversion to punycode when period added at the end, %#v and should be %#v", full, tst[1]+".") - } - // assert punycode.com. == unicode.com. - decoded := FromPunycode(tst[1] + ".") - if decoded != strings.ToLower(tst[0]+".") { - t.Errorf("invalid result from string conversion to punycode when period added, %#v and should be %#v", decoded, tst[0]+".") - } - full = ToPunycode(tst[0]) - if full != tst[1] { - t.Errorf("invalid result from string conversion to punycode when no period added at the end, %#v and should be %#v", full, tst[1]+".") - } - // assert punycode.com. == unicode.com. - decoded = FromPunycode(tst[1]) - if decoded != strings.ToLower(tst[0]) { - t.Errorf("invalid result from string conversion to punycode when no period added, %#v and should be %#v", decoded, tst[0]+".") - } - } -} - -var invalidACEs = []string{ - "xn--*", - "xn--", - "xn---", - "xn--a000000000", -} - -func TestInvalidPunycode(t *testing.T) { - for _, d := range invalidACEs { - s := FromPunycode(d) - if s != d { - t.Errorf("Changed invalid name %s to %#v", d, s) - } - } -} - -// You can verify the labels that are valid or not comparing to the Verisign -// website: http://mct.verisign-grs.com/ -var invalidUnicodes = []string{ - "Σ", - "ЯZ", - "ИÑпытание", -} - -func TestInvalidUnicodes(t *testing.T) { - for _, d := range invalidUnicodes { - s := ToPunycode(d) - if s != "" { - t.Errorf("Changed invalid name %s to %#v", d, s) - } - } -} diff --git a/vendor/github.com/miekg/dns/issue_test.go b/vendor/github.com/miekg/dns/issue_test.go deleted file mode 100644 index 265ad56c..00000000 --- a/vendor/github.com/miekg/dns/issue_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package dns - -// Tests that solve that an specific issue. - -import ( - "strings" - "testing" -) - -func TestTCPRtt(t *testing.T) { - m := new(Msg) - m.RecursionDesired = true - m.SetQuestion("example.org.", TypeA) - - c := &Client{} - for _, proto := range []string{"udp", "tcp"} { - c.Net = proto - _, rtt, err := c.Exchange(m, "8.8.4.4:53") - if err != nil { - t.Fatal(err) - } - if rtt == 0 { - t.Fatalf("expecting non zero rtt %s, got zero", c.Net) - } - } -} - -func TestNSEC3MissingSalt(t *testing.T) { - rr, err := NewRR("ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. NSEC3 1 1 12 aabbccdd K8UDEMVP1J2F7EG6JEBPS17VP3N8I58H") - if err != nil { - t.Fatalf("failed to parse example rr: %s", err) - } - m := new(Msg) - m.Answer = []RR{rr} - mb, err := m.Pack() - if err != nil { - t.Fatalf("expected to pack message. err: %s", err) - } - if err := m.Unpack(mb); err != nil { - t.Fatalf("expected to unpack message. missing salt? err: %s", err) - } - in := rr.(*NSEC3).Salt - out := m.Answer[0].(*NSEC3).Salt - if in != out { - t.Fatalf("expected salts to match. packed: `%s`. returned: `%s`", in, out) - } -} - -func TestNSEC3MixedNextDomain(t *testing.T) { - rr, err := NewRR("ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. NSEC3 1 1 12 - k8udemvp1j2f7eg6jebps17vp3n8i58h") - if err != nil { - t.Fatalf("failed to parse example rr: %s", err) - } - m := new(Msg) - m.Answer = []RR{rr} - mb, err := m.Pack() - if err != nil { - t.Fatalf("expected to pack message. err: %s", err) - } - if err := m.Unpack(mb); err != nil { - t.Fatalf("expected to unpack message. err: %s", err) - } - in := strings.ToUpper(rr.(*NSEC3).NextDomain) - out := m.Answer[0].(*NSEC3).NextDomain - if in != out { - t.Fatalf("expected round trip to produce NextDomain `%s`, instead `%s`", in, out) - } -} diff --git a/vendor/github.com/miekg/dns/labels_test.go b/vendor/github.com/miekg/dns/labels_test.go deleted file mode 100644 index 9875d6cd..00000000 --- a/vendor/github.com/miekg/dns/labels_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package dns - -import "testing" - -func TestCompareDomainName(t *testing.T) { - s1 := "www.miek.nl." - s2 := "miek.nl." - s3 := "www.bla.nl." - s4 := "nl.www.bla." - s5 := "nl" - s6 := "miek.nl" - - if CompareDomainName(s1, s2) != 2 { - t.Errorf("%s with %s should be %d", s1, s2, 2) - } - if CompareDomainName(s1, s3) != 1 { - t.Errorf("%s with %s should be %d", s1, s3, 1) - } - if CompareDomainName(s3, s4) != 0 { - t.Errorf("%s with %s should be %d", s3, s4, 0) - } - // Non qualified tests - if CompareDomainName(s1, s5) != 1 { - t.Errorf("%s with %s should be %d", s1, s5, 1) - } - if CompareDomainName(s1, s6) != 2 { - t.Errorf("%s with %s should be %d", s1, s5, 2) - } - - if CompareDomainName(s1, ".") != 0 { - t.Errorf("%s with %s should be %d", s1, s5, 0) - } - if CompareDomainName(".", ".") != 0 { - t.Errorf("%s with %s should be %d", ".", ".", 0) - } - if CompareDomainName("test.com.", "TEST.COM.") != 2 { - t.Errorf("test.com. and TEST.COM. should be an exact match") - } -} - -func TestSplit(t *testing.T) { - splitter := map[string]int{ - "www.miek.nl.": 3, - "www.miek.nl": 3, - "www..miek.nl": 4, - `www\.miek.nl.`: 2, - `www\\.miek.nl.`: 3, - ".": 0, - "nl.": 1, - "nl": 1, - "com.": 1, - ".com.": 2, - } - for s, i := range splitter { - if x := len(Split(s)); x != i { - t.Errorf("labels should be %d, got %d: %s %v", i, x, s, Split(s)) - } else { - t.Logf("%s %v", s, Split(s)) - } - } -} - -func TestSplit2(t *testing.T) { - splitter := map[string][]int{ - "www.miek.nl.": {0, 4, 9}, - "www.miek.nl": {0, 4, 9}, - "nl": {0}, - } - for s, i := range splitter { - x := Split(s) - switch len(i) { - case 1: - if x[0] != i[0] { - t.Errorf("labels should be %v, got %v: %s", i, x, s) - } - default: - if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] { - t.Errorf("labels should be %v, got %v: %s", i, x, s) - } - } - } -} - -func TestPrevLabel(t *testing.T) { - type prev struct { - string - int - } - prever := map[prev]int{ - prev{"www.miek.nl.", 0}: 12, - prev{"www.miek.nl.", 1}: 9, - prev{"www.miek.nl.", 2}: 4, - - prev{"www.miek.nl", 0}: 11, - prev{"www.miek.nl", 1}: 9, - prev{"www.miek.nl", 2}: 4, - - prev{"www.miek.nl.", 5}: 0, - prev{"www.miek.nl", 5}: 0, - - prev{"www.miek.nl.", 3}: 0, - prev{"www.miek.nl", 3}: 0, - } - for s, i := range prever { - x, ok := PrevLabel(s.string, s.int) - if i != x { - t.Errorf("label should be %d, got %d, %t: preving %d, %s", i, x, ok, s.int, s.string) - } - } -} - -func TestCountLabel(t *testing.T) { - splitter := map[string]int{ - "www.miek.nl.": 3, - "www.miek.nl": 3, - "nl": 1, - ".": 0, - } - for s, i := range splitter { - x := CountLabel(s) - if x != i { - t.Errorf("CountLabel should have %d, got %d", i, x) - } - } -} - -func TestSplitDomainName(t *testing.T) { - labels := map[string][]string{ - "miek.nl": {"miek", "nl"}, - ".": nil, - "www.miek.nl.": {"www", "miek", "nl"}, - "www.miek.nl": {"www", "miek", "nl"}, - "www..miek.nl": {"www", "", "miek", "nl"}, - `www\.miek.nl`: {`www\.miek`, "nl"}, - `www\\.miek.nl`: {`www\\`, "miek", "nl"}, - ".www.miek.nl.": {"", "www", "miek", "nl"}, - } -domainLoop: - for domain, splits := range labels { - parts := SplitDomainName(domain) - if len(parts) != len(splits) { - t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) - continue domainLoop - } - for i := range parts { - if parts[i] != splits[i] { - t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) - continue domainLoop - } - } - } -} - -func TestIsDomainName(t *testing.T) { - type ret struct { - ok bool - lab int - } - names := map[string]*ret{ - "..": {false, 1}, - "@.": {true, 1}, - "www.example.com": {true, 3}, - "www.e%ample.com": {true, 3}, - "www.example.com.": {true, 3}, - "mi\\k.nl.": {true, 2}, - "mi\\k.nl": {true, 2}, - } - for d, ok := range names { - l, k := IsDomainName(d) - if ok.ok != k || ok.lab != l { - t.Errorf(" got %v %d for %s ", k, l, d) - t.Errorf("have %v %d for %s ", ok.ok, ok.lab, d) - } - } -} - -func BenchmarkSplitLabels(b *testing.B) { - for i := 0; i < b.N; i++ { - Split("www.example.com") - } -} - -func BenchmarkLenLabels(b *testing.B) { - for i := 0; i < b.N; i++ { - CountLabel("www.example.com") - } -} - -func BenchmarkCompareLabels(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - CompareDomainName("www.example.com", "aa.example.com") - } -} - -func BenchmarkIsSubDomain(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - IsSubDomain("www.example.com", "aa.example.com") - IsSubDomain("example.com", "aa.example.com") - IsSubDomain("miek.nl", "aa.example.com") - } -} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go deleted file mode 100644 index 4d9f81d4..00000000 --- a/vendor/github.com/miekg/dns/msg_generate.go +++ /dev/null @@ -1,349 +0,0 @@ -//+build ignore - -// msg_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate pack/unpack methods based on the struct tags. The generated source is -// written to zmsg.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" -) - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from msg_generate.go - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - fmt.Fprint(b, "// pack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) - fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) -if err != nil { - return off, err -} -headerEnd := off -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("off, err = packStringTxt(rr.%s, msg, off)\n") - case `dns:"opt"`: - o("off, err = packDataOpt(rr.%s, msg, off)\n") - case `dns:"nsec"`: - o("off, err = packDataNsec(rr.%s, msg, off)\n") - case `dns:"domain-name"`: - o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("off, err = PackDomainName(rr.%s, msg, off, compression, false)\n") - case st.Tag(i) == `dns:"a"`: - o("off, err = packDataA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"aaaa"`: - o("off, err = packDataAAAA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"uint48"`: - o("off, err = packUint48(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"txt"`: - o("off, err = packString(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 - fallthrough - case st.Tag(i) == `dns:"base32"`: - o("off, err = packStringBase32(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("off, err = packStringBase64(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): - // directly write instead of using o() so we get the error check in the correct place - field := st.Field(i).Name() - fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty -if rr.%s != "-" { - off, err = packStringHex(rr.%s, msg, off) - if err != nil { - return off, err - } -} -`, field, field) - continue - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("off, err = packStringHex(rr.%s, msg, off)\n") - - case st.Tag(i) == `dns:"octet"`: - o("off, err = packStringOctet(rr.%s, msg, off)\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("off, err = packUint8(rr.%s, msg, off)\n") - case types.Uint16: - o("off, err = packUint16(rr.%s, msg, off)\n") - case types.Uint32: - o("off, err = packUint32(rr.%s, msg, off)\n") - case types.Uint64: - o("off, err = packUint64(rr.%s, msg, off)\n") - case types.String: - o("off, err = packString(rr.%s, msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - // We have packed everything, only now we know the rdlength of this RR - fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") - fmt.Fprintln(b, "return off, nil }\n") - } - - fmt.Fprint(b, "// unpack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) - fmt.Fprintf(b, "rr := new(%s)\n", name) - fmt.Fprint(b, "rr.Hdr = h\n") - fmt.Fprint(b, `if noRdata(h) { -return rr, off, nil - } -var err error -rdStart := off -_ = rdStart - -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - } - - // size-* are special, because they reference a struct member we should use for the length. - if strings.HasPrefix(st.Tag(i), `dns:"size-`) { - structMember := structMember(st.Tag(i)) - structTag := structTag(st.Tag(i)) - switch structTag { - case "hex": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base32": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base64": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - continue - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("rr.%s, off, err = unpackStringTxt(msg, off)\n") - case `dns:"opt"`: - o("rr.%s, off, err = unpackDataOpt(msg, off)\n") - case `dns:"nsec"`: - o("rr.%s, off, err = unpackDataNsec(msg, off)\n") - case `dns:"domain-name"`: - o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - o("rr.%s, off, err = UnpackDomainName(msg, off)\n") - case `dns:"a"`: - o("rr.%s, off, err = unpackDataA(msg, off)\n") - case `dns:"aaaa"`: - o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") - case `dns:"uint48"`: - o("rr.%s, off, err = unpackUint48(msg, off)\n") - case `dns:"txt"`: - o("rr.%s, off, err = unpackString(msg, off)\n") - case `dns:"base32"`: - o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"base64"`: - o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"hex"`: - o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"octet"`: - o("rr.%s, off, err = unpackStringOctet(msg, off)\n") - case "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("rr.%s, off, err = unpackUint8(msg, off)\n") - case types.Uint16: - o("rr.%s, off, err = unpackUint16(msg, off)\n") - case types.Uint32: - o("rr.%s, off, err = unpackUint32(msg, off)\n") - case types.Uint64: - o("rr.%s, off, err = unpackUint64(msg, off)\n") - case types.String: - o("rr.%s, off, err = unpackString(msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - // If we've hit len(msg) we return without error. - if i < st.NumFields()-1 { - fmt.Fprintf(b, `if off == len(msg) { -return rr, off, nil - } -`) - } - } - fmt.Fprintf(b, "return rr, off, err }\n\n") - } - // Generate typeToUnpack map - fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") - for _, name := range namedTypes { - if name == "RFC3597" { - continue - } - fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) - } - fmt.Fprintln(b, "}\n") - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zmsg.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. -func structMember(s string) string { - fields := strings.Split(s, ":") - if len(fields) == 0 { - return "" - } - f := fields[len(fields)-1] - // f should have a closing " - if len(f) > 1 { - return f[:len(f)-1] - } - return f -} - -// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. -func structTag(s string) string { - fields := strings.Split(s, ":") - if len(fields) < 2 { - return "" - } - return fields[1][len("\"size-"):] -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/msg_test.go b/vendor/github.com/miekg/dns/msg_test.go deleted file mode 100644 index 2dbef626..00000000 --- a/vendor/github.com/miekg/dns/msg_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package dns - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "testing" -) - -const ( - maxPrintableLabel = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789x" - tooLongLabel = maxPrintableLabel + "x" -) - -var ( - longDomain = maxPrintableLabel[:53] + strings.TrimSuffix( - strings.Join([]string{".", ".", ".", ".", "."}, maxPrintableLabel[:49]), ".") - reChar = regexp.MustCompile(`.`) - i = -1 - maxUnprintableLabel = reChar.ReplaceAllStringFunc(maxPrintableLabel, func(ch string) string { - if i++; i >= 32 { - i = 0 - } - return fmt.Sprintf("\\%03d", i) - }) -) - -func TestUnpackDomainName(t *testing.T) { - var cases = []struct { - label string - input string - expectedOutput string - expectedError string - }{ - {"empty domain", - "\x00", - ".", - ""}, - {"long label", - string(63) + maxPrintableLabel + "\x00", - maxPrintableLabel + ".", - ""}, - {"unprintable label", - string(63) + regexp.MustCompile(`\\[0-9]+`).ReplaceAllStringFunc(maxUnprintableLabel, - func(escape string) string { - n, _ := strconv.ParseInt(escape[1:], 10, 8) - return string(n) - }) + "\x00", - maxUnprintableLabel + ".", - ""}, - {"long domain", - string(53) + strings.Replace(longDomain, ".", string(49), -1) + "\x00", - longDomain + ".", - ""}, - {"compression pointer", - // an unrealistic but functional test referencing an offset _inside_ a label - "\x03foo" + "\x05\x03com\x00" + "\x07example" + "\xC0\x05", - "foo.\\003com\\000.example.com.", - ""}, - - {"too long domain", - string(54) + "x" + strings.Replace(longDomain, ".", string(49), -1) + "\x00", - "x" + longDomain + ".", - ErrLongDomain.Error()}, - {"too long by pointer", - // a matryoshka doll name to get over 255 octets after expansion via internal pointers - string([]byte{ - // 11 length values, first to last - 40, 37, 34, 31, 28, 25, 22, 19, 16, 13, 0, - // 12 filler values - 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, - // 10 pointers, last to first - 192, 10, 192, 9, 192, 8, 192, 7, 192, 6, 192, 5, 192, 4, 192, 3, 192, 2, 192, 1, - }), - "", - ErrLongDomain.Error()}, - {"long by pointer", - // a matryoshka doll name _not_ exceeding 255 octets after expansion - string([]byte{ - // 11 length values, first to last - 37, 34, 31, 28, 25, 22, 19, 16, 13, 10, 0, - // 9 filler values - 120, 120, 120, 120, 120, 120, 120, 120, 120, - // 10 pointers, last to first - 192, 10, 192, 9, 192, 8, 192, 7, 192, 6, 192, 5, 192, 4, 192, 3, 192, 2, 192, 1, - }), - "" + - (`\"\031\028\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005\192\004\192\003\192\002.`) + - (`\031\028\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005\192\004\192\003.`) + - (`\028\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005\192\004.`) + - (`\025\022\019\016\013\010\000xxxxxxxxx` + - `\192\010\192\009\192\008\192\007\192\006\192\005.`) + - `\022\019\016\013\010\000xxxxxxxxx\192\010\192\009\192\008\192\007\192\006.` + - `\019\016\013\010\000xxxxxxxxx\192\010\192\009\192\008\192\007.` + - `\016\013\010\000xxxxxxxxx\192\010\192\009\192\008.` + - `\013\010\000xxxxxxxxx\192\010\192\009.` + - `\010\000xxxxxxxxx\192\010.` + - `\000xxxxxxxxx.`, - ""}, - {"truncated name", "\x07example\x03", "", "dns: buffer size too small"}, - {"non-absolute name", "\x07example\x03com", "", "dns: buffer size too small"}, - {"compression pointer cycle", - "\x03foo" + "\x03bar" + "\x07example" + "\xC0\x04", - "", - "dns: too many compression pointers"}, - {"reserved compression pointer 0b10", "\x07example\x80", "", "dns: bad rdata"}, - {"reserved compression pointer 0b01", "\x07example\x40", "", "dns: bad rdata"}, - } - for _, test := range cases { - output, idx, err := UnpackDomainName([]byte(test.input), 0) - if test.expectedOutput != "" && output != test.expectedOutput { - t.Errorf("%s: expected %s, got %s", test.label, test.expectedOutput, output) - } - if test.expectedError == "" && err != nil { - t.Errorf("%s: expected no error, got %d %v", test.label, idx, err) - } else if test.expectedError != "" && (err == nil || err.Error() != test.expectedError) { - t.Errorf("%s: expected error %s, got %d %v", test.label, test.expectedError, idx, err) - } - } -} diff --git a/vendor/github.com/miekg/dns/nsecx_test.go b/vendor/github.com/miekg/dns/nsecx_test.go deleted file mode 100644 index 8d5f7179..00000000 --- a/vendor/github.com/miekg/dns/nsecx_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package dns - -import "testing" - -func TestPackNsec3(t *testing.T) { - nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD") - if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" { - t.Error(nsec3) - } - - nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD") - if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" { - t.Error(nsec3) - } -} - -func TestNsec3(t *testing.T) { - nsec3, _ := NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") - if !nsec3.(*NSEC3).Match("nl.") { // name hash = sk4e8fj94u78smusb40o1n0oltbblu2r - t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } - if !nsec3.(*NSEC3).Match("NL.") { // name hash = sk4e8fj94u78smusb40o1n0oltbblu2r - t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.NL. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } - if nsec3.(*NSEC3).Match("com.") { // - t.Fatal("com. is not in the zone nl.") - } - if nsec3.(*NSEC3).Match("test.nl.") { // name hash = gd0ptr5bnfpimpu2d3v6gd4n0bai7s0q - t.Fatal("gd0ptr5bnfpimpu2d3v6gd4n0bai7s0q.nl. should not match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } - nsec3, _ = NewRR("nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") - if nsec3.(*NSEC3).Match("nl.") { - t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should not match a record without a owner hash") - } - - for _, tc := range []struct { - rr *NSEC3 - name string - covers bool - }{ - // positive tests - { // name hash between owner hash and next hash - rr: &NSEC3{ - Hdr: RR_Header{Name: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "PT3RON8N7PM3A0OE989IB84OOSADP7O8", - }, - name: "bsd.com.", - covers: true, - }, - { // end of zone, name hash is after owner hash - rr: &NSEC3{ - Hdr: RR_Header{Name: "3v62ulr0nre83v0rja2vjgtlif9v6rab.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP", - }, - name: "csd.com.", - covers: true, - }, - { // end of zone, name hash is before beginning of zone - rr: &NSEC3{ - Hdr: RR_Header{Name: "PT3RON8N7PM3A0OE989IB84OOSADP7O8.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "3V62ULR0NRE83V0RJA2VJGTLIF9V6RAB", - }, - name: "asd.com.", - covers: true, - }, - // negative tests - { // too short owner name - rr: &NSEC3{ - Hdr: RR_Header{Name: "nl."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "39P99DCGG0MDLARTCRMCF6OFLLUL7PR6", - }, - name: "asd.com.", - covers: false, - }, - { // outside of zone - rr: &NSEC3{ - Hdr: RR_Header{Name: "39p91242oslggest5e6a7cci4iaeqvnk.nl."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "39P99DCGG0MDLARTCRMCF6OFLLUL7PR6", - }, - name: "asd.com.", - covers: false, - }, - { // empty interval - rr: &NSEC3{ - Hdr: RR_Header{Name: "2n1tb3vairuobl6rkdvii42n9tfmialp.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP", - }, - name: "asd.com.", - covers: false, - }, - { // name hash is before owner hash, not covered - rr: &NSEC3{ - Hdr: RR_Header{Name: "3V62ULR0NRE83V0RJA2VJGTLIF9V6RAB.com."}, - Hash: 1, - Flags: 1, - Iterations: 5, - Salt: "F10E9F7EA83FC8F3", - NextDomain: "PT3RON8N7PM3A0OE989IB84OOSADP7O8", - }, - name: "asd.com.", - covers: false, - }, - } { - covers := tc.rr.Cover(tc.name) - if tc.covers != covers { - t.Fatalf("Cover failed for %s: expected %t, got %t [record: %s]", tc.name, tc.covers, covers, tc.rr) - } - } -} diff --git a/vendor/github.com/miekg/dns/parse_test.go b/vendor/github.com/miekg/dns/parse_test.go deleted file mode 100644 index fc5bdaf5..00000000 --- a/vendor/github.com/miekg/dns/parse_test.go +++ /dev/null @@ -1,1540 +0,0 @@ -package dns - -import ( - "bytes" - "crypto/rsa" - "encoding/hex" - "fmt" - "math/rand" - "net" - "reflect" - "strconv" - "strings" - "testing" - "testing/quick" - "time" -) - -func TestDotInName(t *testing.T) { - buf := make([]byte, 20) - PackDomainName("aa\\.bb.nl.", buf, 0, nil, false) - // index 3 must be a real dot - if buf[3] != '.' { - t.Error("dot should be a real dot") - } - - if buf[6] != 2 { - t.Error("this must have the value 2") - } - dom, _, _ := UnpackDomainName(buf, 0) - // printing it should yield the backspace again - if dom != "aa\\.bb.nl." { - t.Error("dot should have been escaped: ", dom) - } -} - -func TestDotLastInLabel(t *testing.T) { - sample := "aa\\..au." - buf := make([]byte, 20) - _, err := PackDomainName(sample, buf, 0, nil, false) - if err != nil { - t.Fatalf("unexpected error packing domain: %v", err) - } - dom, _, _ := UnpackDomainName(buf, 0) - if dom != sample { - t.Fatalf("unpacked domain `%s' doesn't match packed domain", dom) - } -} - -func TestTooLongDomainName(t *testing.T) { - l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt." - dom := l + l + l + l + l + l + l - _, err := NewRR(dom + " IN A 127.0.0.1") - if err == nil { - t.Error("should be too long") - } else { - t.Logf("error is %v", err) - } - _, err = NewRR("..com. IN A 127.0.0.1") - if err == nil { - t.Error("should fail") - } else { - t.Logf("error is %v", err) - } -} - -func TestDomainName(t *testing.T) { - tests := []string{"r\\.gieben.miek.nl.", "www\\.www.miek.nl.", - "www.*.miek.nl.", "www.*.miek.nl.", - } - dbuff := make([]byte, 40) - - for _, ts := range tests { - if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil { - t.Error("not a valid domain name") - continue - } - n, _, err := UnpackDomainName(dbuff, 0) - if err != nil { - t.Error("failed to unpack packed domain name") - continue - } - if ts != n { - t.Errorf("must be equal: in: %s, out: %s", ts, n) - } - } -} - -func TestDomainNameAndTXTEscapes(t *testing.T) { - tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', 9, 13, 10, 0, 255} - for _, b := range tests { - rrbytes := []byte{ - 1, b, 0, // owner - byte(TypeTXT >> 8), byte(TypeTXT), - byte(ClassINET >> 8), byte(ClassINET), - 0, 0, 0, 1, // TTL - 0, 2, 1, b, // Data - } - rr1, _, err := UnpackRR(rrbytes, 0) - if err != nil { - panic(err) - } - s := rr1.String() - rr2, err := NewRR(s) - if err != nil { - t.Errorf("Error parsing unpacked RR's string: %v", err) - t.Errorf(" Bytes: %v", rrbytes) - t.Errorf("String: %v", s) - } - repacked := make([]byte, len(rrbytes)) - if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil { - t.Errorf("error packing parsed RR: %v", err) - t.Errorf(" original Bytes: %v", rrbytes) - t.Errorf("unpacked Struct: %v", rr1) - t.Errorf(" parsed Struct: %v", rr2) - } - if !bytes.Equal(repacked, rrbytes) { - t.Error("packed bytes don't match original bytes") - t.Errorf(" original bytes: %v", rrbytes) - t.Errorf(" packed bytes: %v", repacked) - t.Errorf("unpacked struct: %v", rr1) - t.Errorf(" parsed struct: %v", rr2) - } - } -} - -func TestTXTEscapeParsing(t *testing.T) { - test := [][]string{ - {`";"`, `";"`}, - {`\;`, `";"`}, - {`"\t"`, `"t"`}, - {`"\r"`, `"r"`}, - {`"\ "`, `" "`}, - {`"\;"`, `";"`}, - {`"\;\""`, `";\""`}, - {`"\(a\)"`, `"(a)"`}, - {`"\(a)"`, `"(a)"`}, - {`"(a\)"`, `"(a)"`}, - {`"(a)"`, `"(a)"`}, - {`"\048"`, `"0"`}, - {`"\` + "\t" + `"`, `"\009"`}, - {`"\` + "\n" + `"`, `"\010"`}, - {`"\` + "\r" + `"`, `"\013"`}, - {`"\` + "\x11" + `"`, `"\017"`}, - {`"\'"`, `"'"`}, - } - for _, s := range test { - rr, err := NewRR(fmt.Sprintf("example.com. IN TXT %v", s[0])) - if err != nil { - t.Errorf("could not parse %v TXT: %s", s[0], err) - continue - } - - txt := sprintTxt(rr.(*TXT).Txt) - if txt != s[1] { - t.Errorf("mismatch after parsing `%v` TXT record: `%v` != `%v`", s[0], txt, s[1]) - } - } -} - -func GenerateDomain(r *rand.Rand, size int) []byte { - dnLen := size % 70 // artificially limit size so there's less to intrepret if a failure occurs - var dn []byte - done := false - for i := 0; i < dnLen && !done; { - max := dnLen - i - if max > 63 { - max = 63 - } - lLen := max - if lLen != 0 { - lLen = int(r.Int31()) % max - } - done = lLen == 0 - if done { - continue - } - l := make([]byte, lLen+1) - l[0] = byte(lLen) - for j := 0; j < lLen; j++ { - l[j+1] = byte(rand.Int31()) - } - dn = append(dn, l...) - i += 1 + lLen - } - return append(dn, 0) -} - -func TestDomainQuick(t *testing.T) { - r := rand.New(rand.NewSource(0)) - f := func(l int) bool { - db := GenerateDomain(r, l) - ds, _, err := UnpackDomainName(db, 0) - if err != nil { - panic(err) - } - buf := make([]byte, 255) - off, err := PackDomainName(ds, buf, 0, nil, false) - if err != nil { - t.Errorf("error packing domain: %v", err) - t.Errorf(" bytes: %v", db) - t.Errorf("string: %v", ds) - return false - } - if !bytes.Equal(db, buf[:off]) { - t.Errorf("repacked domain doesn't match original:") - t.Errorf("src bytes: %v", db) - t.Errorf(" string: %v", ds) - t.Errorf("out bytes: %v", buf[:off]) - return false - } - return true - } - if err := quick.Check(f, nil); err != nil { - t.Error(err) - } -} - -func GenerateTXT(r *rand.Rand, size int) []byte { - rdLen := size % 300 // artificially limit size so there's less to intrepret if a failure occurs - var rd []byte - for i := 0; i < rdLen; { - max := rdLen - 1 - if max > 255 { - max = 255 - } - sLen := max - if max != 0 { - sLen = int(r.Int31()) % max - } - s := make([]byte, sLen+1) - s[0] = byte(sLen) - for j := 0; j < sLen; j++ { - s[j+1] = byte(rand.Int31()) - } - rd = append(rd, s...) - i += 1 + sLen - } - return rd -} - -// Ok, 2 things. 1) this test breaks with the new functionality of splitting up larger txt -// chunks into 255 byte pieces. 2) I don't like the random nature of this thing, because I can't -// place the quotes where they need to be. -// So either add some code the places the quotes in just the right spots, make this non random -// or do something else. -// Disabled for now. (miek) -func testTXTRRQuick(t *testing.T) { - s := rand.NewSource(0) - r := rand.New(s) - typeAndClass := []byte{ - byte(TypeTXT >> 8), byte(TypeTXT), - byte(ClassINET >> 8), byte(ClassINET), - 0, 0, 0, 1, // TTL - } - f := func(l int) bool { - owner := GenerateDomain(r, l) - rdata := GenerateTXT(r, l) - rrbytes := make([]byte, 0, len(owner)+2+2+4+2+len(rdata)) - rrbytes = append(rrbytes, owner...) - rrbytes = append(rrbytes, typeAndClass...) - rrbytes = append(rrbytes, byte(len(rdata)>>8)) - rrbytes = append(rrbytes, byte(len(rdata))) - rrbytes = append(rrbytes, rdata...) - rr, _, err := UnpackRR(rrbytes, 0) - if err != nil { - panic(err) - } - buf := make([]byte, len(rrbytes)*3) - off, err := PackRR(rr, buf, 0, nil, false) - if err != nil { - t.Errorf("pack Error: %v\nRR: %v", err, rr) - return false - } - buf = buf[:off] - if !bytes.Equal(buf, rrbytes) { - t.Errorf("packed bytes don't match original bytes") - t.Errorf("src bytes: %v", rrbytes) - t.Errorf(" struct: %v", rr) - t.Errorf("out bytes: %v", buf) - return false - } - if len(rdata) == 0 { - // string'ing won't produce any data to parse - return true - } - rrString := rr.String() - rr2, err := NewRR(rrString) - if err != nil { - t.Errorf("error parsing own output: %v", err) - t.Errorf("struct: %v", rr) - t.Errorf("string: %v", rrString) - return false - } - if rr2.String() != rrString { - t.Errorf("parsed rr.String() doesn't match original string") - t.Errorf("original: %v", rrString) - t.Errorf(" parsed: %v", rr2.String()) - return false - } - - buf = make([]byte, len(rrbytes)*3) - off, err = PackRR(rr2, buf, 0, nil, false) - if err != nil { - t.Errorf("error packing parsed rr: %v", err) - t.Errorf("unpacked Struct: %v", rr) - t.Errorf(" string: %v", rrString) - t.Errorf(" parsed Struct: %v", rr2) - return false - } - buf = buf[:off] - if !bytes.Equal(buf, rrbytes) { - t.Errorf("parsed packed bytes don't match original bytes") - t.Errorf(" source bytes: %v", rrbytes) - t.Errorf("unpacked struct: %v", rr) - t.Errorf(" string: %v", rrString) - t.Errorf(" parsed struct: %v", rr2) - t.Errorf(" repacked bytes: %v", buf) - return false - } - return true - } - c := &quick.Config{MaxCountScale: 10} - if err := quick.Check(f, c); err != nil { - t.Error(err) - } -} - -func TestParseDirectiveMisc(t *testing.T) { - tests := map[string]string{ - "$ORIGIN miek.nl.\na IN NS b": "a.miek.nl.\t3600\tIN\tNS\tb.miek.nl.", - "$TTL 2H\nmiek.nl. IN NS b.": "miek.nl.\t7200\tIN\tNS\tb.", - "miek.nl. 1D IN NS b.": "miek.nl.\t86400\tIN\tNS\tb.", - `name. IN SOA a6.nstld.com. hostmaster.nic.name. ( - 203362132 ; serial - 5m ; refresh (5 minutes) - 5m ; retry (5 minutes) - 2w ; expire (2 weeks) - 300 ; minimum (5 minutes) -)`: "name.\t3600\tIN\tSOA\ta6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300", - ". 3600000 IN NS ONE.MY-ROOTS.NET.": ".\t3600000\tIN\tNS\tONE.MY-ROOTS.NET.", - "ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestNSEC(t *testing.T) { - nsectests := map[string]string{ - "nl. IN NSEC3PARAM 1 0 5 30923C44C6CBBB8F": "nl.\t3600\tIN\tNSEC3PARAM\t1 0 5 30923C44C6CBBB8F", - "p2209hipbpnm681knjnu0m1febshlv4e.nl. IN NSEC3 1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM": "p2209hipbpnm681knjnu0m1febshlv4e.nl.\t3600\tIN\tNSEC3\t1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM", - "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC", - "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC TYPE65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", - "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", - "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test. NSEC3 1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA": "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test.\t3600\tIN\tNSEC3\t1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA", - } - for i, o := range nsectests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseLOC(t *testing.T) { - lt := map[string]string{ - "SW1A2AA.find.me.uk. LOC 51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 30 12.748 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", - "SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseDS(t *testing.T) { - dt := map[string]string{ - "example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F", - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestQuotes(t *testing.T) { - tests := map[string]string{ - `t.example.com. IN TXT "a bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a bc\"", - `t.example.com. IN TXT "a - bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\010 bc\"", - `t.example.com. IN TXT ""`: "t.example.com.\t3600\tIN\tTXT\t\"\"", - `t.example.com. IN TXT "a"`: "t.example.com.\t3600\tIN\tTXT\t\"a\"", - `t.example.com. IN TXT "aa"`: "t.example.com.\t3600\tIN\tTXT\t\"aa\"", - `t.example.com. IN TXT "aaa" ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", - `t.example.com. IN TXT "abc" "DEF"`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", - `t.example.com. IN TXT "abc" ( "DEF" )`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", - `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", - `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"", - `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"", - `t.example.com. IN TXT aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", - "cid.urn.arpa. NAPTR 100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.", - "cid.urn.arpa. NAPTR 100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.", - "cid.urn.arpa. NAPTR 100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.", - "cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseClass(t *testing.T) { - tests := map[string]string{ - "t.example.com. IN A 127.0.0.1": "t.example.com. 3600 IN A 127.0.0.1", - "t.example.com. CS A 127.0.0.1": "t.example.com. 3600 CS A 127.0.0.1", - "t.example.com. CH A 127.0.0.1": "t.example.com. 3600 CH A 127.0.0.1", - // ClassANY can not occur in zone files - // "t.example.com. ANY A 127.0.0.1": "t.example.com. 3600 ANY A 127.0.0.1", - "t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestBrace(t *testing.T) { - tests := map[string]string{ - "(miek.nl.) 3600 IN A 127.0.1.1": "miek.nl.\t3600\tIN\tA\t127.0.1.1", - "miek.nl. (3600) IN MX (10) elektron.atoom.net.": "miek.nl.\t3600\tIN\tMX\t10 elektron.atoom.net.", - `miek.nl. IN ( - 3600 A 127.0.0.1)`: "miek.nl.\t3600\tIN\tA\t127.0.0.1", - "(miek.nl.) (A) (127.0.2.1)": "miek.nl.\t3600\tIN\tA\t127.0.2.1", - "miek.nl A 127.0.3.1": "miek.nl.\t3600\tIN\tA\t127.0.3.1", - "_ssh._tcp.local. 60 IN (PTR) stora._ssh._tcp.local.": "_ssh._tcp.local.\t60\tIN\tPTR\tstora._ssh._tcp.local.", - "miek.nl. NS ns.miek.nl": "miek.nl.\t3600\tIN\tNS\tns.miek.nl.", - `(miek.nl.) ( - (IN) - (AAAA) - (::1) )`: "miek.nl.\t3600\tIN\tAAAA\t::1", - `(miek.nl.) ( - (IN) - (AAAA) - (::1))`: "miek.nl.\t3600\tIN\tAAAA\t::1", - "miek.nl. IN AAAA ::2": "miek.nl.\t3600\tIN\tAAAA\t::2", - `((m)(i)ek.(n)l.) (SOA) (soa.) (soa.) ( - 2009032802 ; serial - 21600 ; refresh (6 hours) - 7(2)00 ; retry (2 hours) - 604()800 ; expire (1 week) - 3600 ; minimum (1 hour) - )`: "miek.nl.\t3600\tIN\tSOA\tsoa. soa. 2009032802 21600 7200 604800 3600", - "miek\\.nl. IN A 127.0.0.10": "miek\\.nl.\t3600\tIN\tA\t127.0.0.10", - "miek.nl. IN A 127.0.0.11": "miek.nl.\t3600\tIN\tA\t127.0.0.11", - "miek.nl. A 127.0.0.12": "miek.nl.\t3600\tIN\tA\t127.0.0.12", - `miek.nl. 86400 IN SOA elektron.atoom.net. miekg.atoom.net. ( - 2009032802 ; serial - 21600 ; refresh (6 hours) - 7200 ; retry (2 hours) - 604800 ; expire (1 week) - 3600 ; minimum (1 hour) - )`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Errorf("failed to parse RR: %v\n\t%s", err, i) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseFailure(t *testing.T) { - tests := []string{"miek.nl. IN A 327.0.0.1", - "miek.nl. IN AAAA ::x", - "miek.nl. IN MX a0 miek.nl.", - "miek.nl aap IN MX mx.miek.nl.", - "miek.nl 200 IN mxx 10 mx.miek.nl.", - "miek.nl. inn MX 10 mx.miek.nl.", - // "miek.nl. IN CNAME ", // actually valid nowadays, zero size rdata - "miek.nl. IN CNAME ..", - "miek.nl. PA MX 10 miek.nl.", - "miek.nl. ) IN MX 10 miek.nl.", - } - - for _, s := range tests { - _, err := NewRR(s) - if err == nil { - t.Errorf("should have triggered an error: \"%s\"", s) - } - } -} - -func TestZoneParsing(t *testing.T) { - // parse_test.db - db := ` -a.example.com. IN A 127.0.0.1 -8db7._openpgpkey.example.com. IN OPENPGPKEY mQCNAzIG -$ORIGIN a.example.com. -test IN A 127.0.0.1 - IN SSHFP 1 2 ( - BC6533CDC95A79078A39A56EA7635984ED655318ADA9 - B6159E30723665DA95BB ) -$ORIGIN b.example.com. -test IN CNAME test.a.example.com. -` - start := time.Now().UnixNano() - to := ParseZone(strings.NewReader(db), "", "parse_test.db") - var i int - for x := range to { - i++ - if x.Error != nil { - t.Error(x.Error) - continue - } - t.Log(x.RR) - } - delta := time.Now().UnixNano() - start - t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9)) -} - -func ExampleParseZone() { - zone := `$ORIGIN . -$TTL 3600 ; 1 hour -name IN SOA a6.nstld.com. hostmaster.nic.name. ( - 203362132 ; serial - 300 ; refresh (5 minutes) - 300 ; retry (5 minutes) - 1209600 ; expire (2 weeks) - 300 ; minimum (5 minutes) - ) -$TTL 10800 ; 3 hours -name. 10800 IN NS name. - IN NS g6.nstld.com. - 7200 NS h6.nstld.com. - 3600 IN NS j6.nstld.com. - IN 3600 NS k6.nstld.com. - NS l6.nstld.com. - NS a6.nstld.com. - NS c6.nstld.com. - NS d6.nstld.com. - NS f6.nstld.com. - NS m6.nstld.com. -( - NS m7.nstld.com. -) -$ORIGIN name. -0-0onlus NS ns7.ehiweb.it. - NS ns8.ehiweb.it. -0-g MX 10 mx01.nic - MX 10 mx02.nic - MX 10 mx03.nic - MX 10 mx04.nic -$ORIGIN 0-g.name -moutamassey NS ns01.yahoodomains.jp. - NS ns02.yahoodomains.jp. -` - to := ParseZone(strings.NewReader(zone), "", "testzone") - for x := range to { - fmt.Println(x.RR) - } - // Output: - // name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300 - // name. 10800 IN NS name. - // name. 10800 IN NS g6.nstld.com. - // name. 7200 IN NS h6.nstld.com. - // name. 3600 IN NS j6.nstld.com. - // name. 3600 IN NS k6.nstld.com. - // name. 10800 IN NS l6.nstld.com. - // name. 10800 IN NS a6.nstld.com. - // name. 10800 IN NS c6.nstld.com. - // name. 10800 IN NS d6.nstld.com. - // name. 10800 IN NS f6.nstld.com. - // name. 10800 IN NS m6.nstld.com. - // name. 10800 IN NS m7.nstld.com. - // 0-0onlus.name. 10800 IN NS ns7.ehiweb.it. - // 0-0onlus.name. 10800 IN NS ns8.ehiweb.it. - // 0-g.name. 10800 IN MX 10 mx01.nic.name. - // 0-g.name. 10800 IN MX 10 mx02.nic.name. - // 0-g.name. 10800 IN MX 10 mx03.nic.name. - // 0-g.name. 10800 IN MX 10 mx04.nic.name. - // moutamassey.0-g.name.name. 10800 IN NS ns01.yahoodomains.jp. - // moutamassey.0-g.name.name. 10800 IN NS ns02.yahoodomains.jp. -} - -func ExampleHIP() { - h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578 - AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p -9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ -b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D - rvs.example.com. )` - if hip, err := NewRR(h); err == nil { - fmt.Println(hip.String()) - } - // Output: - // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com. -} - -func TestHIP(t *testing.T) { - h := `www.example.com. IN HIP ( 2 200100107B1A74DF365639CC39F1D578 - AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p -9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ -b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D - rvs1.example.com. - rvs2.example.com. )` - rr, err := NewRR(h) - if err != nil { - t.Fatalf("failed to parse RR: %v", err) - } - t.Logf("RR: %s", rr) - msg := new(Msg) - msg.Answer = []RR{rr, rr} - bytes, err := msg.Pack() - if err != nil { - t.Fatalf("failed to pack msg: %v", err) - } - if err := msg.Unpack(bytes); err != nil { - t.Fatalf("failed to unpack msg: %v", err) - } - if len(msg.Answer) != 2 { - t.Fatalf("2 answers expected: %v", msg) - } - for i, rr := range msg.Answer { - rr := rr.(*HIP) - t.Logf("RR: %s", rr) - if l := len(rr.RendezvousServers); l != 2 { - t.Fatalf("2 servers expected, only %d in record %d:\n%v", l, i, msg) - } - for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} { - if rr.RendezvousServers[j] != s { - t.Fatalf("expected server %d of record %d to be %s:\n%v", j, i, s, msg) - } - } - } -} - -func ExampleSOA() { - s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100" - if soa, err := NewRR(s); err == nil { - fmt.Println(soa.String()) - } - // Output: - // example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100 -} - -func TestLineNumberError(t *testing.T) { - s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100" - if _, err := NewRR(s); err != nil { - if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" { - t.Error("not expecting this error: ", err) - } - } -} - -// Test with no known RR on the line -func TestLineNumberError2(t *testing.T) { - tests := map[string]string{ - "example.com. 1000 SO master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100": "dns: expecting RR type or class, not this...: \"SO\" at line: 1:21", - "example.com 1000 IN TALINK a.example.com. b..example.com.": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:57", - "example.com 1000 IN TALINK ( a.example.com. b..example.com. )": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:60", - `example.com 1000 IN TALINK ( a.example.com. - bb..example.com. )`: "dns: bad TALINK NextName: \"bb..example.com.\" at line: 2:18", - // This is a bug, it should report an error on line 1, but the new is already processed. - `example.com 1000 IN TALINK ( a.example.com. b...example.com. - )`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"} - - for in, errStr := range tests { - _, err := NewRR(in) - if err == nil { - t.Error("err is nil") - } else { - if err.Error() != errStr { - t.Errorf("%s: error should be %s is %v", in, errStr, err) - } - } - } -} - -// Test if the calculations are correct -func TestRfc1982(t *testing.T) { - // If the current time and the timestamp are more than 68 years apart - // it means the date has wrapped. 0 is 1970 - - // fall in the current 68 year span - strtests := []string{"20120525134203", "19700101000000", "20380119031408"} - for _, v := range strtests { - if x, _ := StringToTime(v); v != TimeToString(x) { - t.Errorf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x) - } - } - - inttests := map[uint32]string{0: "19700101000000", - 1 << 31: "20380119031408", - 1<<32 - 1: "21060207062815", - } - for i, v := range inttests { - if TimeToString(i) != v { - t.Errorf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i)) - } - } - - // Future tests, these dates get parsed to a date within the current 136 year span - future := map[string]string{"22680119031408": "20631123173144", - "19010101121212": "20370206184028", - "19210101121212": "20570206184028", - "19500101121212": "20860206184028", - "19700101000000": "19700101000000", - "19690101000000": "21050207062816", - "29210101121212": "21040522212236", - } - for from, to := range future { - x, _ := StringToTime(from) - y := TimeToString(x) - if y != to { - t.Errorf("1982 arithmetic future failure %s:%s (%s)", from, to, y) - } - } -} - -func TestEmpty(t *testing.T) { - for range ParseZone(strings.NewReader(""), "", "") { - t.Errorf("should be empty") - } -} - -func TestLowercaseTokens(t *testing.T) { - var testrecords = []string{ - "example.org. 300 IN a 1.2.3.4", - "example.org. 300 in A 1.2.3.4", - "example.org. 300 in a 1.2.3.4", - "example.org. 300 a 1.2.3.4", - "example.org. 300 A 1.2.3.4", - "example.org. IN a 1.2.3.4", - "example.org. in A 1.2.3.4", - "example.org. in a 1.2.3.4", - "example.org. a 1.2.3.4", - "example.org. A 1.2.3.4", - "example.org. a 1.2.3.4", - "$ORIGIN example.org.\n a 1.2.3.4", - "$Origin example.org.\n a 1.2.3.4", - "$origin example.org.\n a 1.2.3.4", - "example.org. Class1 Type1 1.2.3.4", - } - for _, testrr := range testrecords { - _, err := NewRR(testrr) - if err != nil { - t.Errorf("failed to parse %#v, got %v", testrr, err) - } - } -} - -func ExampleParseZone_generate() { - // From the manual: http://www.bind9.net/manual/bind/9.3.2/Bv9ARM.ch06.html#id2566761 - zone := "$GENERATE 1-2 0 NS SERVER$.EXAMPLE.\n$GENERATE 1-8 $ CNAME $.0" - to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "") - for x := range to { - if x.Error == nil { - fmt.Println(x.RR.String()) - } - } - // Output: - // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER1.EXAMPLE. - // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER2.EXAMPLE. - // 1.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 1.0.0.0.192.IN-ADDR.ARPA. - // 2.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 2.0.0.0.192.IN-ADDR.ARPA. - // 3.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 3.0.0.0.192.IN-ADDR.ARPA. - // 4.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 4.0.0.0.192.IN-ADDR.ARPA. - // 5.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 5.0.0.0.192.IN-ADDR.ARPA. - // 6.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 6.0.0.0.192.IN-ADDR.ARPA. - // 7.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 7.0.0.0.192.IN-ADDR.ARPA. - // 8.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 8.0.0.0.192.IN-ADDR.ARPA. -} - -func TestSRVPacking(t *testing.T) { - msg := Msg{} - - things := []string{"1.2.3.4:8484", - "45.45.45.45:8484", - "84.84.84.84:8484", - } - - for i, n := range things { - h, p, err := net.SplitHostPort(n) - if err != nil { - continue - } - port, _ := strconv.ParseUint(p, 10, 16) - - rr := &SRV{ - Hdr: RR_Header{Name: "somename.", - Rrtype: TypeSRV, - Class: ClassINET, - Ttl: 5}, - Priority: uint16(i), - Weight: 5, - Port: uint16(port), - Target: h + ".", - } - - msg.Answer = append(msg.Answer, rr) - } - - _, err := msg.Pack() - if err != nil { - t.Fatalf("couldn't pack %v: %v", msg, err) - } -} - -func TestParseBackslash(t *testing.T) { - if r, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil { - t.Errorf("could not create RR with \\000 in it") - } else { - t.Logf("parsed %s", r.String()) - } - if r, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil { - t.Errorf("could not create RR with \\000 in it") - } else { - t.Logf("parsed %s", r.String()) - } - if r, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil { - t.Errorf("could not create RR with \\ and \\@ in it") - } else { - t.Logf("parsed %s", r.String()) - } -} - -func TestILNP(t *testing.T) { - tests := []string{ - "host1.example.com.\t3600\tIN\tNID\t10 0014:4fff:ff20:ee64", - "host1.example.com.\t3600\tIN\tNID\t20 0015:5fff:ff21:ee65", - "host2.example.com.\t3600\tIN\tNID\t10 0016:6fff:ff22:ee66", - "host1.example.com.\t3600\tIN\tL32\t10 10.1.2.0", - "host1.example.com.\t3600\tIN\tL32\t20 10.1.4.0", - "host2.example.com.\t3600\tIN\tL32\t10 10.1.8.0", - "host1.example.com.\t3600\tIN\tL64\t10 2001:0DB8:1140:1000", - "host1.example.com.\t3600\tIN\tL64\t20 2001:0DB8:2140:2000", - "host2.example.com.\t3600\tIN\tL64\t10 2001:0DB8:4140:4000", - "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet1.example.com.", - "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet2.example.com.", - "host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.", - } - for _, t1 := range tests { - r, err := NewRR(t1) - if err != nil { - t.Fatalf("an error occurred: %v", err) - } else { - if t1 != r.String() { - t.Fatalf("strings should be equal %s %s", t1, r.String()) - } - } - } -} - -func TestGposEidNimloc(t *testing.T) { - dt := map[string]string{ - "444433332222111199990123000000ff. NSAP-PTR foo.bar.com.": "444433332222111199990123000000ff.\t3600\tIN\tNSAP-PTR\tfoo.bar.com.", - "lillee. IN GPOS -32.6882 116.8652 10.0": "lillee.\t3600\tIN\tGPOS\t-32.6882 116.8652 10.0", - "hinault. IN GPOS -22.6882 116.8652 250.0": "hinault.\t3600\tIN\tGPOS\t-22.6882 116.8652 250.0", - "VENERA. IN NIMLOC 75234159EAC457800920": "VENERA.\t3600\tIN\tNIMLOC\t75234159EAC457800920", - "VAXA. IN EID 3141592653589793": "VAXA.\t3600\tIN\tEID\t3141592653589793", - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestPX(t *testing.T) { - dt := map[string]string{ - "*.net2.it. IN PX 10 net2.it. PRMD-net2.ADMD-p400.C-it.": "*.net2.it.\t3600\tIN\tPX\t10 net2.it. PRMD-net2.ADMD-p400.C-it.", - "ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.", - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestComment(t *testing.T) { - // Comments we must see - comments := map[string]bool{"; this is comment 1": true, - "; this is comment 4": true, "; this is comment 6": true, - "; this is comment 7": true, "; this is comment 8": true} - zone := ` -foo. IN A 10.0.0.1 ; this is comment 1 -foo. IN A ( - 10.0.0.2 ; this is comment2 -) -; this is comment3 -foo. IN A 10.0.0.3 -foo. IN A ( 10.0.0.4 ); this is comment 4 - -foo. IN A 10.0.0.5 -; this is comment5 - -foo. IN A 10.0.0.6 - -foo. IN DNSKEY 256 3 5 AwEAAb+8l ; this is comment 6 -foo. IN NSEC miek.nl. TXT RRSIG NSEC; this is comment 7 -foo. IN TXT "THIS IS TEXT MAN"; this is comment 8 -` - for x := range ParseZone(strings.NewReader(zone), ".", "") { - if x.Error == nil { - if x.Comment != "" { - if _, ok := comments[x.Comment]; !ok { - t.Errorf("wrong comment %s", x.Comment) - } - } - } - } -} - -func TestEUIxx(t *testing.T) { - tests := map[string]string{ - "host.example. IN EUI48 00-00-5e-90-01-2a": "host.example.\t3600\tIN\tEUI48\t00-00-5e-90-01-2a", - "host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a", - } - for i, o := range tests { - r, err := NewRR(i) - if err != nil { - t.Errorf("failed to parse %s: %v", i, err) - } - if r.String() != o { - t.Errorf("want %s, got %s", o, r.String()) - } - } -} - -func TestUserRR(t *testing.T) { - tests := map[string]string{ - "host.example. IN UID 1234": "host.example.\t3600\tIN\tUID\t1234", - "host.example. IN GID 1234556": "host.example.\t3600\tIN\tGID\t1234556", - "host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"", - } - for i, o := range tests { - r, err := NewRR(i) - if err != nil { - t.Errorf("failed to parse %s: %v", i, err) - } - if r.String() != o { - t.Errorf("want %s, got %s", o, r.String()) - } - } -} - -func TestTXT(t *testing.T) { - // Test single entry TXT record - rr, err := NewRR(`_raop._tcp.local. 60 IN TXT "single value"`) - if err != nil { - t.Error("failed to parse single value TXT record", err) - } else if rr, ok := rr.(*TXT); !ok { - t.Error("wrong type, record should be of type TXT") - } else { - if len(rr.Txt) != 1 { - t.Error("bad size of TXT value:", len(rr.Txt)) - } else if rr.Txt[0] != "single value" { - t.Error("bad single value") - } - if rr.String() != `_raop._tcp.local. 60 IN TXT "single value"` { - t.Error("bad representation of TXT record:", rr.String()) - } - if rr.len() != 28+1+12 { - t.Error("bad size of serialized record:", rr.len()) - } - } - - // Test multi entries TXT record - rr, err = NewRR(`_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"`) - if err != nil { - t.Error("failed to parse multi-values TXT record", err) - } else if rr, ok := rr.(*TXT); !ok { - t.Error("wrong type, record should be of type TXT") - } else { - if len(rr.Txt) != 4 { - t.Error("bad size of TXT multi-value:", len(rr.Txt)) - } else if rr.Txt[0] != "a=1" || rr.Txt[1] != "b=2" || rr.Txt[2] != "c=3" || rr.Txt[3] != "d=4" { - t.Error("bad values in TXT records") - } - if rr.String() != `_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"` { - t.Error("bad representation of TXT multi value record:", rr.String()) - } - if rr.len() != 28+1+3+1+3+1+3+1+3 { - t.Error("bad size of serialized multi value record:", rr.len()) - } - } - - // Test empty-string in TXT record - rr, err = NewRR(`_raop._tcp.local. 60 IN TXT ""`) - if err != nil { - t.Error("failed to parse empty-string TXT record", err) - } else if rr, ok := rr.(*TXT); !ok { - t.Error("wrong type, record should be of type TXT") - } else { - if len(rr.Txt) != 1 { - t.Error("bad size of TXT empty-string value:", len(rr.Txt)) - } else if rr.Txt[0] != "" { - t.Error("bad value for empty-string TXT record") - } - if rr.String() != `_raop._tcp.local. 60 IN TXT ""` { - t.Error("bad representation of empty-string TXT record:", rr.String()) - } - if rr.len() != 28+1 { - t.Error("bad size of serialized record:", rr.len()) - } - } - - // Test TXT record with chunk larger than 255 bytes, they should be split up, by the parser - s := "" - for i := 0; i < 255; i++ { - s += "a" - } - s += "b" - rr, err = NewRR(`test.local. 60 IN TXT "` + s + `"`) - if err != nil { - t.Error("failed to parse empty-string TXT record", err) - } - if rr.(*TXT).Txt[1] != "b" { - t.Errorf("Txt should have two chunk, last one my be 'b', but is %s", rr.(*TXT).Txt[1]) - } - t.Log(rr.String()) -} - -func TestTypeXXXX(t *testing.T) { - _, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd") - if err != nil { - t.Errorf("failed to parse TYPE1234 RR: %v", err) - } - _, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd") - if err == nil { - t.Errorf("this should not work, for TYPE655341") - } - _, err = NewRR("example.com IN TYPE1 \\# 4 0a000001") - if err == nil { - t.Errorf("this should not work") - } -} - -func TestPTR(t *testing.T) { - _, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.") - if err != nil { - t.Error("failed to parse ", err) - } -} - -func TestDigit(t *testing.T) { - tests := map[string]byte{ - "miek\\000.nl. 100 IN TXT \"A\"": 0, - "miek\\001.nl. 100 IN TXT \"A\"": 1, - "miek\\254.nl. 100 IN TXT \"A\"": 254, - "miek\\255.nl. 100 IN TXT \"A\"": 255, - "miek\\256.nl. 100 IN TXT \"A\"": 0, - "miek\\257.nl. 100 IN TXT \"A\"": 1, - "miek\\004.nl. 100 IN TXT \"A\"": 4, - } - for s, i := range tests { - r, err := NewRR(s) - buf := make([]byte, 40) - if err != nil { - t.Fatalf("failed to parse %v", err) - } - PackRR(r, buf, 0, nil, false) - t.Log(buf) - if buf[5] != i { - t.Fatalf("5 pos must be %d, is %d", i, buf[5]) - } - r1, _, _ := UnpackRR(buf, 0) - if r1.Header().Ttl != 100 { - t.Fatalf("TTL should %d, is %d", 100, r1.Header().Ttl) - } - } -} - -func TestParseRRSIGTimestamp(t *testing.T) { - tests := map[string]bool{ - `miek.nl. IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, - `miek.nl. IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, - } - for r := range tests { - _, err := NewRR(r) - if err != nil { - t.Error(err) - } - } -} - -func TestTxtEqual(t *testing.T) { - rr1 := new(TXT) - rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - rr1.Txt = []string{"a\"a", "\"", "b"} - rr2, _ := NewRR(rr1.String()) - if rr1.String() != rr2.String() { - // This is not an error, but keep this test. - t.Errorf("these two TXT records should match:\n%s\n%s", rr1.String(), rr2.String()) - } - t.Logf("%s\n%s", rr1.String(), rr2.String()) -} - -func TestTxtLong(t *testing.T) { - rr1 := new(TXT) - rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - // Make a long txt record, this breaks when sending the packet, - // but not earlier. - rr1.Txt = []string{"start-"} - for i := 0; i < 200; i++ { - rr1.Txt[0] += "start-" - } - str := rr1.String() - if len(str) < len(rr1.Txt[0]) { - t.Error("string conversion should work") - } -} - -// Basically, don't crash. -func TestMalformedPackets(t *testing.T) { - var packets = []string{ - "0021641c0000000100000000000078787878787878787878787303636f6d0000100001", - } - - // com = 63 6f 6d - for _, packet := range packets { - data, _ := hex.DecodeString(packet) - // for _, v := range data { - // t.Log(v) - // } - var msg Msg - msg.Unpack(data) - // println(msg.String()) - } -} - -type algorithm struct { - name uint8 - bits int -} - -func TestNewPrivateKey(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - algorithms := []algorithm{ - {ECDSAP256SHA256, 256}, - {ECDSAP384SHA384, 384}, - {RSASHA1, 1024}, - {RSASHA256, 2048}, - {DSA, 1024}, - } - - for _, algo := range algorithms { - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = algo.name - privkey, err := key.Generate(algo.bits) - if err != nil { - t.Fatal(err) - } - - newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey)) - if err != nil { - t.Error(key.String()) - t.Error(key.PrivateKeyString(privkey)) - t.Fatal(err) - } - - switch newPrivKey := newPrivKey.(type) { - case *rsa.PrivateKey: - newPrivKey.Precompute() - } - - if !reflect.DeepEqual(privkey, newPrivKey) { - t.Errorf("[%v] Private keys differ:\n%#v\n%#v", AlgorithmToString[algo.name], privkey, newPrivKey) - } - } -} - -// special input test -func TestNewRRSpecial(t *testing.T) { - var ( - rr RR - err error - expect string - ) - - rr, err = NewRR("; comment") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("$ORIGIN foo.") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR(" ") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("\n") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2") - expect = "foo.\t3600\tIN\tA\t1.1.1.1" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr == nil || rr.String() != expect { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } -} - -func TestPrintfVerbsRdata(t *testing.T) { - x, _ := NewRR("www.miek.nl. IN MX 20 mx.miek.nl.") - if Field(x, 1) != "20" { - t.Errorf("should be 20") - } - if Field(x, 2) != "mx.miek.nl." { - t.Errorf("should be mx.miek.nl.") - } - - x, _ = NewRR("www.miek.nl. IN A 127.0.0.1") - if Field(x, 1) != "127.0.0.1" { - t.Errorf("should be 127.0.0.1") - } - - x, _ = NewRR("www.miek.nl. IN AAAA ::1") - if Field(x, 1) != "::1" { - t.Errorf("should be ::1") - } - - x, _ = NewRR("www.miek.nl. IN NSEC a.miek.nl. A NS SOA MX AAAA") - if Field(x, 1) != "a.miek.nl." { - t.Errorf("should be a.miek.nl.") - } - if Field(x, 2) != "A NS SOA MX AAAA" { - t.Errorf("should be A NS SOA MX AAAA") - } - - x, _ = NewRR("www.miek.nl. IN TXT \"first\" \"second\"") - if Field(x, 1) != "first second" { - t.Errorf("should be first second") - } - if Field(x, 0) != "" { - t.Errorf("should be empty") - } -} - -func TestParseTokenOverflow(t *testing.T) { - _, err := NewRR("_443._tcp.example.org. IN TLSA 0 0 0 308205e8308204d0a00302010202100411de8f53b462f6a5a861b712ec6b59300d06092a864886f70d01010b05003070310b300906035504061302555331153013060355040a130c446967694365727420496e6331193017060355040b13107777772e64696769636572742e636f6d312f302d06035504031326446967694365727420534841322048696768204173737572616e636520536572766572204341301e170d3134313130363030303030305a170d3135313131333132303030305a3081a5310b3009060355040613025553311330110603550408130a43616c69666f726e6961311430120603550407130b4c6f7320416e67656c6573313c303a060355040a1333496e7465726e657420436f72706f726174696f6e20666f722041737369676e6564204e616d657320616e64204e756d6265727331133011060355040b130a546563686e6f6c6f6779311830160603550403130f7777772e6578616d706c652e6f726730820122300d06092a864886f70d01010105000382010f003082010a02820101009e663f52a3d18cb67cdfed547408a4e47e4036538988da2798da3b6655f7240d693ed1cb3fe6d6ad3a9e657ff6efa86b83b0cad24e5d31ff2bf70ec3b78b213f1b4bf61bdc669cbbc07d67154128ca92a9b3cbb4213a836fb823ddd4d7cc04918314d25f06086fa9970ba17e357cca9b458c27eb71760ab95e3f9bc898ae89050ae4d09ba2f7e4259d9ff1e072a6971b18355a8b9e53670c3d5dbdbd283f93a764e71b3a4140ca0746090c08510e2e21078d7d07844bf9c03865b531a0bf2ee766bc401f6451c5a1e6f6fb5d5c1d6a97a0abe91ae8b02e89241e07353909ccd5b41c46de207c06801e08f20713603827f2ae3e68cf15ef881d7e0608f70742e30203010001a382024630820242301f0603551d230418301680145168ff90af0207753cccd9656462a212b859723b301d0603551d0e04160414b000a7f422e9b1ce216117c4c46e7164c8e60c553081810603551d11047a3078820f7777772e6578616d706c652e6f7267820b6578616d706c652e636f6d820b6578616d706c652e656475820b6578616d706c652e6e6574820b6578616d706c652e6f7267820f7777772e6578616d706c652e636f6d820f7777772e6578616d706c652e656475820f7777772e6578616d706c652e6e6574300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b0601050507030230750603551d1f046e306c3034a032a030862e687474703a2f2f63726c332e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c3034a032a030862e687474703a2f2f63726c342e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c30420603551d20043b3039303706096086480186fd6c0101302a302806082b06010505070201161c68747470733a2f2f7777772e64696769636572742e636f6d2f43505330818306082b0601050507010104773075302406082b060105050730018618687474703a2f2f6f6373702e64696769636572742e636f6d304d06082b060105050730028641687474703a2f2f636163657274732e64696769636572742e636f6d2f446967694365727453484132486967684173737572616e636553657276657243412e637274300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101005eac2124dedb3978a86ff3608406acb542d3cb54cb83facd63aec88144d6a1bf15dbf1f215c4a73e241e582365cba9ea50dd306541653b3513af1a0756c1b2720e8d112b34fb67181efad9c4609bdc670fb025fa6e6d42188161b026cf3089a08369c2f3609fc84bcc3479140c1922ede430ca8dbac2b2a3cdacb305ba15dc7361c4c3a5e6daa99cb446cb221b28078a7a944efba70d96f31ac143d959bccd2fd50e30c325ea2624fb6b6dbe9344dbcf133bfbd5b4e892d635dbf31596451672c6b65ba5ac9b3cddea92b35dab1065cae3c8cb6bb450a62ea2f72ea7c6bdc7b65fa09b012392543734083c7687d243f8d0375304d99ccd2e148966a8637a6797") - if err == nil { - t.Fatalf("token overflow should return an error") - } - t.Logf("err: %s\n", err) -} - -func TestParseTLSA(t *testing.T) { - lt := []string{ - "_443._tcp.example.org.\t3600\tIN\tTLSA\t1 1 1 c22be239f483c08957bc106219cc2d3ac1a308dfbbdd0a365f17b9351234cf00", - "_443._tcp.example.org.\t3600\tIN\tTLSA\t2 1 2 4e85f45179e9cd6e0e68e2eb5be2e85ec9b92d91c609caf3ef0315213e3f92ece92c38397a607214de95c7fadc0ad0f1c604a469a0387959745032c0d51492f3", - "_443._tcp.example.org.\t3600\tIN\tTLSA\t3 0 2 69ec8d2277360b215d0cd956b0e2747108dff34b27d461a41c800629e38ee6c2d1230cc9e8e36711330adc6766e6ff7c5fbb37f106f248337c1a20ad682888d2", - } - for _, o := range lt { - rr, err := NewRR(o) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseSMIMEA(t *testing.T) { - lt := map[string]string{ - "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t1 1 2 bd80f334566928fc18f58df7e4928c1886f48f71ca3fd41cd9b1854aca7c2180aaacad2819612ed68e7bd3701cc39be7f2529b017c0bc6a53e8fb3f0c7d48070": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t1 1 2 bd80f334566928fc18f58df7e4928c1886f48f71ca3fd41cd9b1854aca7c2180aaacad2819612ed68e7bd3701cc39be7f2529b017c0bc6a53e8fb3f0c7d48070", - "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t0 0 1 cdcf0fc66b182928c5217ddd42c826983f5a4b94160ee6c1c9be62d38199f710": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t0 0 1 cdcf0fc66b182928c5217ddd42c826983f5a4b94160ee6c1c9be62d38199f710", - "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b", - "2e85e1db3e62be6eb._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8 c26b251fa0c887ba4869f01 1a65f7e79967c2eb729f5b": "2e85e1db3e62be6eb._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseSSHFP(t *testing.T) { - lt := []string{ - "test.example.org.\t300\tSSHFP\t1 2 (\n" + - "\t\t\t\t\tBC6533CDC95A79078A39A56EA7635984ED655318ADA9\n" + - "\t\t\t\t\tB6159E30723665DA95BB )", - "test.example.org.\t300\tSSHFP\t1 2 ( BC6533CDC 95A79078A39A56EA7635984ED655318AD A9B6159E3072366 5DA95BB )", - } - result := "test.example.org.\t300\tIN\tSSHFP\t1 2 BC6533CDC95A79078A39A56EA7635984ED655318ADA9B6159E30723665DA95BB" - for _, o := range lt { - rr, err := NewRR(o) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != result { - t.Errorf("`%s' should be equal to\n\n`%s', but is \n`%s'", o, result, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseHINFO(t *testing.T) { - dt := map[string]string{ - "example.net. HINFO A B": "example.net. 3600 IN HINFO \"A\" \"B\"", - "example.net. HINFO \"A\" \"B\"": "example.net. 3600 IN HINFO \"A\" \"B\"", - "example.net. HINFO A B C D E F": "example.net. 3600 IN HINFO \"A\" \"B C D E F\"", - "example.net. HINFO AB": "example.net. 3600 IN HINFO \"AB\" \"\"", - // "example.net. HINFO PC-Intel-700mhz \"Redhat Linux 7.1\"": "example.net. 3600 IN HINFO \"PC-Intel-700mhz\" \"Redhat Linux 7.1\"", - // This one is recommended in Pro Bind book http://www.zytrax.com/books/dns/ch8/hinfo.html - // but effectively, even Bind would replace it to correctly formed text when you AXFR - // TODO: remove this set of comments or figure support for quoted/unquoted combinations in endingToTxtSlice function - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseCAA(t *testing.T) { - lt := map[string]string{ - "example.net. CAA 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", - "example.net. CAA 0 issuewild \"symantec.com; stuff\"": "example.net.\t3600\tIN\tCAA\t0 issuewild \"symantec.com; stuff\"", - "example.net. CAA 128 tbs \"critical\"": "example.net.\t3600\tIN\tCAA\t128 tbs \"critical\"", - "example.net. CAA 2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"": "example.net.\t3600\tIN\tCAA\t2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"", - "example.net. TYPE257 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestPackCAA(t *testing.T) { - m := new(Msg) - record := new(CAA) - record.Hdr = RR_Header{Name: "example.com.", Rrtype: TypeCAA, Class: ClassINET, Ttl: 0} - record.Tag = "issue" - record.Value = "symantec.com" - record.Flag = 1 - - m.Answer = append(m.Answer, record) - bytes, err := m.Pack() - if err != nil { - t.Fatalf("failed to pack msg: %v", err) - } - if err := m.Unpack(bytes); err != nil { - t.Fatalf("failed to unpack msg: %v", err) - } - if len(m.Answer) != 1 { - t.Fatalf("incorrect number of answers unpacked") - } - rr := m.Answer[0].(*CAA) - if rr.Tag != "issue" { - t.Fatalf("invalid tag for unpacked answer") - } else if rr.Value != "symantec.com" { - t.Fatalf("invalid value for unpacked answer") - } else if rr.Flag != 1 { - t.Fatalf("invalid flag for unpacked answer") - } -} - -func TestParseURI(t *testing.T) { - lt := map[string]string{ - "_http._tcp. IN URI 10 1 \"http://www.example.com/path\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"http://www.example.com/path\"", - "_http._tcp. IN URI 10 1 \"\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"\"", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseAVC(t *testing.T) { - avcs := map[string]string{ - `example.org. IN AVC "app-name:WOLFGANG|app-class:OAM|business=yes"`: `example.org. 3600 IN AVC "app-name:WOLFGANG|app-class:OAM|business=yes"`, - } - for avc, o := range avcs { - rr, err := NewRR(avc) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", avc, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestUnbalancedParens(t *testing.T) { - sig := `example.com. 3600 IN RRSIG MX 15 2 3600 ( - 1440021600 1438207200 3613 example.com. ( - oL9krJun7xfBOIWcGHi7mag5/hdZrKWw15jPGrHpjQeRAvTdszaPD+QLs3f - x8A4M3e23mRZ9VrbpMngwcrqNAg== )` - _, err := NewRR(sig) - if err == nil { - t.Fatalf("Failed to detect extra opening brace") - } -} diff --git a/vendor/github.com/miekg/dns/privaterr_test.go b/vendor/github.com/miekg/dns/privaterr_test.go deleted file mode 100644 index 72ec8f5c..00000000 --- a/vendor/github.com/miekg/dns/privaterr_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package dns_test - -import ( - "strings" - "testing" - - "github.com/miekg/dns" -) - -const TypeISBN uint16 = 0xFF00 - -// A crazy new RR type :) -type ISBN struct { - x string // rdata with 10 or 13 numbers, dashes or spaces allowed -} - -func NewISBN() dns.PrivateRdata { return &ISBN{""} } - -func (rd *ISBN) Len() int { return len([]byte(rd.x)) } -func (rd *ISBN) String() string { return rd.x } - -func (rd *ISBN) Parse(txt []string) error { - rd.x = strings.TrimSpace(strings.Join(txt, " ")) - return nil -} - -func (rd *ISBN) Pack(buf []byte) (int, error) { - b := []byte(rd.x) - n := copy(buf, b) - if n != len(b) { - return n, dns.ErrBuf - } - return n, nil -} - -func (rd *ISBN) Unpack(buf []byte) (int, error) { - rd.x = string(buf) - return len(buf), nil -} - -func (rd *ISBN) Copy(dest dns.PrivateRdata) error { - isbn, ok := dest.(*ISBN) - if !ok { - return dns.ErrRdata - } - isbn.x = rd.x - return nil -} - -var testrecord = strings.Join([]string{"example.org.", "3600", "IN", "ISBN", "12-3 456789-0-123"}, "\t") - -func TestPrivateText(t *testing.T) { - dns.PrivateHandle("ISBN", TypeISBN, NewISBN) - defer dns.PrivateHandleRemove(TypeISBN) - - rr, err := dns.NewRR(testrecord) - if err != nil { - t.Fatal(err) - } - if rr.String() != testrecord { - t.Errorf("record string representation did not match original %#v != %#v", rr.String(), testrecord) - } else { - t.Log(rr.String()) - } -} - -func TestPrivateByteSlice(t *testing.T) { - dns.PrivateHandle("ISBN", TypeISBN, NewISBN) - defer dns.PrivateHandleRemove(TypeISBN) - - rr, err := dns.NewRR(testrecord) - if err != nil { - t.Fatal(err) - } - - buf := make([]byte, 100) - off, err := dns.PackRR(rr, buf, 0, nil, false) - if err != nil { - t.Errorf("got error packing ISBN: %v", err) - } - - custrr := rr.(*dns.PrivateRR) - if ln := custrr.Data.Len() + len(custrr.Header().Name) + 11; ln != off { - t.Errorf("offset is not matching to length of Private RR: %d!=%d", off, ln) - } - - rr1, off1, err := dns.UnpackRR(buf[:off], 0) - if err != nil { - t.Errorf("got error unpacking ISBN: %v", err) - return - } - - if off1 != off { - t.Errorf("offset after unpacking differs: %d != %d", off1, off) - } - - if rr1.String() != testrecord { - t.Errorf("record string representation did not match original %#v != %#v", rr1.String(), testrecord) - } else { - t.Log(rr1.String()) - } -} - -const TypeVERSION uint16 = 0xFF01 - -type VERSION struct { - x string -} - -func NewVersion() dns.PrivateRdata { return &VERSION{""} } - -func (rd *VERSION) String() string { return rd.x } -func (rd *VERSION) Parse(txt []string) error { - rd.x = strings.TrimSpace(strings.Join(txt, " ")) - return nil -} - -func (rd *VERSION) Pack(buf []byte) (int, error) { - b := []byte(rd.x) - n := copy(buf, b) - if n != len(b) { - return n, dns.ErrBuf - } - return n, nil -} - -func (rd *VERSION) Unpack(buf []byte) (int, error) { - rd.x = string(buf) - return len(buf), nil -} - -func (rd *VERSION) Copy(dest dns.PrivateRdata) error { - isbn, ok := dest.(*VERSION) - if !ok { - return dns.ErrRdata - } - isbn.x = rd.x - return nil -} - -func (rd *VERSION) Len() int { - return len([]byte(rd.x)) -} - -var smallzone = `$ORIGIN example.org. -@ SOA sns.dns.icann.org. noc.dns.icann.org. ( - 2014091518 7200 3600 1209600 3600 -) - A 1.2.3.4 -ok ISBN 1231-92110-12 -go VERSION ( - 1.3.1 ; comment -) -www ISBN 1231-92110-16 -* CNAME @ -` - -func TestPrivateZoneParser(t *testing.T) { - dns.PrivateHandle("ISBN", TypeISBN, NewISBN) - dns.PrivateHandle("VERSION", TypeVERSION, NewVersion) - defer dns.PrivateHandleRemove(TypeISBN) - defer dns.PrivateHandleRemove(TypeVERSION) - - r := strings.NewReader(smallzone) - for x := range dns.ParseZone(r, ".", "") { - if err := x.Error; err != nil { - t.Fatal(err) - } - t.Log(x.RR) - } -} diff --git a/vendor/github.com/miekg/dns/remote_test.go b/vendor/github.com/miekg/dns/remote_test.go deleted file mode 100644 index 4cf701fe..00000000 --- a/vendor/github.com/miekg/dns/remote_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package dns - -import "testing" - -const LinodeAddr = "176.58.119.54:53" - -func TestClientRemote(t *testing.T) { - m := new(Msg) - m.SetQuestion("go.dns.miek.nl.", TypeTXT) - - c := new(Client) - r, _, err := c.Exchange(m, LinodeAddr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} diff --git a/vendor/github.com/miekg/dns/sanitize_test.go b/vendor/github.com/miekg/dns/sanitize_test.go deleted file mode 100644 index 2ba3fe9a..00000000 --- a/vendor/github.com/miekg/dns/sanitize_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package dns - -import "testing" - -func TestDedup(t *testing.T) { - // make it []string - testcases := map[[3]RR][]string{ - [...]RR{ - newRR(t, "mIek.nl. IN A 127.0.0.1"), - newRR(t, "mieK.nl. IN A 127.0.0.1"), - newRR(t, "miek.Nl. IN A 127.0.0.1"), - }: {"mIek.nl.\t3600\tIN\tA\t127.0.0.1"}, - [...]RR{ - newRR(t, "miEk.nl. 2000 IN A 127.0.0.1"), - newRR(t, "mieK.Nl. 1000 IN A 127.0.0.1"), - newRR(t, "Miek.nL. 500 IN A 127.0.0.1"), - }: {"miEk.nl.\t500\tIN\tA\t127.0.0.1"}, - [...]RR{ - newRR(t, "miek.nl. IN A 127.0.0.1"), - newRR(t, "miek.nl. CH A 127.0.0.1"), - newRR(t, "miek.nl. IN A 127.0.0.1"), - }: {"miek.nl.\t3600\tIN\tA\t127.0.0.1", - "miek.nl.\t3600\tCH\tA\t127.0.0.1", - }, - [...]RR{ - newRR(t, "miek.nl. CH A 127.0.0.1"), - newRR(t, "miek.nl. IN A 127.0.0.1"), - newRR(t, "miek.de. IN A 127.0.0.1"), - }: {"miek.nl.\t3600\tCH\tA\t127.0.0.1", - "miek.nl.\t3600\tIN\tA\t127.0.0.1", - "miek.de.\t3600\tIN\tA\t127.0.0.1", - }, - [...]RR{ - newRR(t, "miek.de. IN A 127.0.0.1"), - newRR(t, "miek.nl. 200 IN A 127.0.0.1"), - newRR(t, "miek.nl. 300 IN A 127.0.0.1"), - }: {"miek.de.\t3600\tIN\tA\t127.0.0.1", - "miek.nl.\t200\tIN\tA\t127.0.0.1", - }, - } - - for rr, expected := range testcases { - out := Dedup([]RR{rr[0], rr[1], rr[2]}, nil) - for i, o := range out { - if o.String() != expected[i] { - t.Fatalf("expected %v, got %v", expected[i], o.String()) - } - } - } -} - -func BenchmarkDedup(b *testing.B) { - rrs := []RR{ - newRR(nil, "miEk.nl. 2000 IN A 127.0.0.1"), - newRR(nil, "mieK.Nl. 1000 IN A 127.0.0.1"), - newRR(nil, "Miek.nL. 500 IN A 127.0.0.1"), - } - m := make(map[string]RR) - for i := 0; i < b.N; i++ { - Dedup(rrs, m) - } -} - -func TestNormalizedString(t *testing.T) { - tests := map[RR]string{ - newRR(t, "mIEk.Nl. 3600 IN A 127.0.0.1"): "miek.nl.\tIN\tA\t127.0.0.1", - newRR(t, "m\\ iek.nL. 3600 IN A 127.0.0.1"): "m\\ iek.nl.\tIN\tA\t127.0.0.1", - newRR(t, "m\\\tIeK.nl. 3600 in A 127.0.0.1"): "m\\009iek.nl.\tIN\tA\t127.0.0.1", - } - for tc, expected := range tests { - n := normalizedString(tc) - if n != expected { - t.Errorf("expected %s, got %s", expected, n) - } - } -} - -func newRR(t *testing.T, s string) RR { - r, err := NewRR(s) - if err != nil { - t.Logf("newRR: %v", err) - } - return r -} diff --git a/vendor/github.com/miekg/dns/scan_test.go b/vendor/github.com/miekg/dns/scan_test.go deleted file mode 100644 index e43ad447..00000000 --- a/vendor/github.com/miekg/dns/scan_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package dns - -import ( - "io/ioutil" - "os" - "strings" - "testing" -) - -func TestParseZoneInclude(t *testing.T) { - - tmpfile, err := ioutil.TempFile("", "dns") - if err != nil { - t.Fatalf("could not create tmpfile for test: %s", err) - } - - if _, err := tmpfile.WriteString("foo\tIN\tA\t127.0.0.1"); err != nil { - t.Fatalf("unable to write content to tmpfile %q: %s", tmpfile.Name(), err) - } - if err := tmpfile.Close(); err != nil { - t.Fatalf("could not close tmpfile %q: %s", tmpfile.Name(), err) - } - - zone := "$ORIGIN example.org.\n$INCLUDE " + tmpfile.Name() - - tok := ParseZone(strings.NewReader(zone), "", "") - for x := range tok { - if x.Error != nil { - t.Fatalf("expected no error, but got %s", x.Error) - } - if x.RR.Header().Name != "foo.example.org." { - t.Fatalf("expected %s, but got %s", "foo.example.org.", x.RR.Header().Name) - } - } - - os.Remove(tmpfile.Name()) - - tok = ParseZone(strings.NewReader(zone), "", "") - for x := range tok { - if x.Error == nil { - t.Fatalf("expected first token to contain an error but it didn't") - } - if !strings.Contains(x.Error.Error(), "failed to open") || - !strings.Contains(x.Error.Error(), tmpfile.Name()) { - t.Fatalf(`expected error to contain: "failed to open" and %q but got: %s`, tmpfile.Name(), x.Error) - } - } -} diff --git a/vendor/github.com/miekg/dns/server_test.go b/vendor/github.com/miekg/dns/server_test.go deleted file mode 100644 index f17a2f90..00000000 --- a/vendor/github.com/miekg/dns/server_test.go +++ /dev/null @@ -1,719 +0,0 @@ -package dns - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "runtime" - "sync" - "testing" - "time" -) - -func HelloServer(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - w.WriteMsg(m) -} - -func HelloServerBadID(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - m.Id++ - - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - w.WriteMsg(m) -} - -func AnotherHelloServer(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello example"}} - w.WriteMsg(m) -} - -func RunLocalUDPServer(laddr string) (*Server, string, error) { - server, l, _, err := RunLocalUDPServerWithFinChan(laddr) - - return server, l, err -} - -func RunLocalUDPServerWithFinChan(laddr string) (*Server, string, chan struct{}, error) { - pc, err := net.ListenPacket("udp", laddr) - if err != nil { - return nil, "", nil, err - } - server := &Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - fin := make(chan struct{}, 0) - - go func() { - server.ActivateAndServe() - close(fin) - pc.Close() - }() - - waitLock.Lock() - return server, pc.LocalAddr().String(), fin, nil -} - -func RunLocalUDPServerUnsafe(laddr string) (*Server, string, error) { - pc, err := net.ListenPacket("udp", laddr) - if err != nil { - return nil, "", err - } - server := &Server{PacketConn: pc, Unsafe: true, - ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - pc.Close() - }() - - waitLock.Lock() - return server, pc.LocalAddr().String(), nil -} - -func RunLocalTCPServer(laddr string) (*Server, string, error) { - l, err := net.Listen("tcp", laddr) - if err != nil { - return nil, "", err - } - - server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - l.Close() - }() - - waitLock.Lock() - return server, l.Addr().String(), nil -} - -func RunLocalTLSServer(laddr string, config *tls.Config) (*Server, string, error) { - l, err := tls.Listen("tcp", laddr, config) - if err != nil { - return nil, "", err - } - - server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - l.Close() - }() - - waitLock.Lock() - return server, l.Addr().String(), nil -} - -func TestServing(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - HandleFunc("example.com.", AnotherHelloServer) - defer HandleRemove("miek.nl.") - defer HandleRemove("example.com.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - r, _, err := c.Exchange(m, addrstr) - if err != nil || len(r.Extra) == 0 { - t.Fatal("failed to exchange miek.nl", err) - } - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello world" { - t.Error("unexpected result for miek.nl", txt, "!= Hello world") - } - - m.SetQuestion("example.com.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("failed to exchange example.com", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } - - // Test Mixes cased as noticed by Ask. - m.SetQuestion("eXaMplE.cOm.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Error("failed to exchange eXaMplE.cOm", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } -} - -func TestServingTLS(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - HandleFunc("example.com.", AnotherHelloServer) - defer HandleRemove("miek.nl.") - defer HandleRemove("example.com.") - - cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) - if err != nil { - t.Fatalf("unable to build certificate: %v", err) - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - c.Net = "tcp-tls" - c.TLSConfig = &tls.Config{ - InsecureSkipVerify: true, - } - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - r, _, err := c.Exchange(m, addrstr) - if err != nil || len(r.Extra) == 0 { - t.Fatal("failed to exchange miek.nl", err) - } - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello world" { - t.Error("unexpected result for miek.nl", txt, "!= Hello world") - } - - m.SetQuestion("example.com.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("failed to exchange example.com", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } - - // Test Mixes cased as noticed by Ask. - m.SetQuestion("eXaMplE.cOm.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Error("failed to exchange eXaMplE.cOm", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("unexpected result for example.com", txt, "!= Hello example") - } -} - -func BenchmarkServe(b *testing.B) { - b.StopTimer() - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - a := runtime.GOMAXPROCS(4) - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - b.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl", TypeSOA) - - b.StartTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, addrstr) - } - runtime.GOMAXPROCS(a) -} - -func benchmarkServe6(b *testing.B) { - b.StopTimer() - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - a := runtime.GOMAXPROCS(4) - s, addrstr, err := RunLocalUDPServer("[::1]:0") - if err != nil { - b.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl", TypeSOA) - - b.StartTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, addrstr) - } - runtime.GOMAXPROCS(a) -} - -func HelloServerCompress(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - m.Compress = true - w.WriteMsg(m) -} - -func BenchmarkServeCompress(b *testing.B) { - b.StopTimer() - HandleFunc("miek.nl.", HelloServerCompress) - defer HandleRemove("miek.nl.") - a := runtime.GOMAXPROCS(4) - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - b.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl", TypeSOA) - b.StartTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, addrstr) - } - runtime.GOMAXPROCS(a) -} - -func TestDotAsCatchAllWildcard(t *testing.T) { - mux := NewServeMux() - mux.Handle(".", HandlerFunc(HelloServer)) - mux.Handle("example.com.", HandlerFunc(AnotherHelloServer)) - - handler := mux.match("www.miek.nl.", TypeTXT) - if handler == nil { - t.Error("wildcard match failed") - } - - handler = mux.match("www.example.com.", TypeTXT) - if handler == nil { - t.Error("example.com match failed") - } - - handler = mux.match("a.www.example.com.", TypeTXT) - if handler == nil { - t.Error("a.www.example.com match failed") - } - - handler = mux.match("boe.", TypeTXT) - if handler == nil { - t.Error("boe. match failed") - } -} - -func TestCaseFolding(t *testing.T) { - mux := NewServeMux() - mux.Handle("_udp.example.com.", HandlerFunc(HelloServer)) - - handler := mux.match("_dns._udp.example.com.", TypeSRV) - if handler == nil { - t.Error("case sensitive characters folded") - } - - handler = mux.match("_DNS._UDP.EXAMPLE.COM.", TypeSRV) - if handler == nil { - t.Error("case insensitive characters not folded") - } -} - -func TestRootServer(t *testing.T) { - mux := NewServeMux() - mux.Handle(".", HandlerFunc(HelloServer)) - - handler := mux.match(".", TypeNS) - if handler == nil { - t.Error("root match failed") - } -} - -type maxRec struct { - max int - sync.RWMutex -} - -var M = new(maxRec) - -func HelloServerLargeResponse(resp ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - m.Authoritative = true - m1 := 0 - M.RLock() - m1 = M.max - M.RUnlock() - for i := 0; i < m1; i++ { - aRec := &A{ - Hdr: RR_Header{ - Name: req.Question[0].Name, - Rrtype: TypeA, - Class: ClassINET, - Ttl: 0, - }, - A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i+1)).To4(), - } - m.Answer = append(m.Answer, aRec) - } - resp.WriteMsg(m) -} - -func TestServingLargeResponses(t *testing.T) { - HandleFunc("example.", HelloServerLargeResponse) - defer HandleRemove("example.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - // Create request - m := new(Msg) - m.SetQuestion("web.service.example.", TypeANY) - - c := new(Client) - c.Net = "udp" - M.Lock() - M.max = 2 - M.Unlock() - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - // This must fail - M.Lock() - M.max = 20 - M.Unlock() - _, _, err = c.Exchange(m, addrstr) - if err == nil { - t.Error("failed to fail exchange, this should generate packet error") - } - // But this must work again - c.UDPSize = 7000 - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } -} - -func TestServingResponse(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - HandleFunc("miek.nl.", HelloServer) - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - m.Response = false - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("failed to exchange", err) - } - m.Response = true - _, _, err = c.Exchange(m, addrstr) - if err == nil { - t.Fatal("exchanged response message") - } - - s.Shutdown() - s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - defer s.Shutdown() - - m.Response = true - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("could exchanged response message in Unsafe mode") - } -} - -func TestShutdownTCP(t *testing.T) { - s, _, err := RunLocalTCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - err = s.Shutdown() - if err != nil { - t.Errorf("could not shutdown test TCP server, %v", err) - } -} - -func TestShutdownTLS(t *testing.T) { - cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) - if err != nil { - t.Fatalf("unable to build certificate: %v", err) - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - s, _, err := RunLocalTLSServer("127.0.0.1:0", &config) - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - err = s.Shutdown() - if err != nil { - t.Errorf("could not shutdown test TLS server, %v", err) - } -} - -type trigger struct { - done bool - sync.RWMutex -} - -func (t *trigger) Set() { - t.Lock() - defer t.Unlock() - t.done = true -} -func (t *trigger) Get() bool { - t.RLock() - defer t.RUnlock() - return t.done -} - -func TestHandlerCloseTCP(t *testing.T) { - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic(err) - } - addr := ln.Addr().String() - - server := &Server{Addr: addr, Net: "tcp", Listener: ln} - - hname := "testhandlerclosetcp." - triggered := &trigger{} - HandleFunc(hname, func(w ResponseWriter, r *Msg) { - triggered.Set() - w.Close() - }) - defer HandleRemove(hname) - - go func() { - defer server.Shutdown() - c := &Client{Net: "tcp"} - m := new(Msg).SetQuestion(hname, 1) - tries := 0 - exchange: - _, _, err := c.Exchange(m, addr) - if err != nil && err != io.EOF { - t.Logf("exchange failed: %s\n", err) - if tries == 3 { - return - } - time.Sleep(time.Second / 10) - tries++ - goto exchange - } - }() - server.ActivateAndServe() - if !triggered.Get() { - t.Fatalf("handler never called") - } -} - -func TestShutdownUDP(t *testing.T) { - s, _, fin, err := RunLocalUDPServerWithFinChan("127.0.0.1:0") - if err != nil { - t.Fatalf("unable to run test server: %v", err) - } - err = s.Shutdown() - if err != nil { - t.Errorf("could not shutdown test UDP server, %v", err) - } - select { - case <-fin: - case <-time.After(2 * time.Second): - t.Error("Could not shutdown test UDP server. Gave up waiting") - } -} - -type ExampleFrameLengthWriter struct { - Writer -} - -func (e *ExampleFrameLengthWriter) Write(m []byte) (int, error) { - fmt.Println("writing raw DNS message of length", len(m)) - return e.Writer.Write(m) -} - -func ExampleDecorateWriter() { - // instrument raw DNS message writing - wf := DecorateWriter(func(w Writer) Writer { - return &ExampleFrameLengthWriter{w} - }) - - // simple UDP server - pc, err := net.ListenPacket("udp", "127.0.0.1:0") - if err != nil { - fmt.Println(err.Error()) - return - } - server := &Server{ - PacketConn: pc, - DecorateWriter: wf, - ReadTimeout: time.Hour, WriteTimeout: time.Hour, - } - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - defer server.Shutdown() - - go func() { - server.ActivateAndServe() - pc.Close() - }() - - waitLock.Lock() - - HandleFunc("miek.nl.", HelloServer) - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - _, _, err = c.Exchange(m, pc.LocalAddr().String()) - if err != nil { - fmt.Println("failed to exchange", err.Error()) - return - } - // Output: writing raw DNS message of length 56 -} - -var ( - // CertPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair) - CertPEMBlock = []byte(`-----BEGIN CERTIFICATE----- -MIIDAzCCAeugAwIBAgIRAJFYMkcn+b8dpU15wjf++GgwDQYJKoZIhvcNAQELBQAw -EjEQMA4GA1UEChMHQWNtZSBDbzAeFw0xNjAxMDgxMjAzNTNaFw0xNzAxMDcxMjAz -NTNaMBIxEDAOBgNVBAoTB0FjbWUgQ28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQDXjqO6skvP03k58CNjQggd9G/mt+Wa+xRU+WXiKCCHttawM8x+slq5 -yfsHCwxlwsGn79HmJqecNqgHb2GWBXAvVVokFDTcC1hUP4+gp2gu9Ny27UHTjlLm -O0l/xZ5MN8tfKyYlFw18tXu3fkaPyHj8v/D1RDkuo4ARdFvGSe8TqisbhLk2+9ow -xfIGbEM9Fdiw8qByC2+d+FfvzIKz3GfQVwn0VoRom8L6NBIANq1IGrB5JefZB6nv -DnfuxkBmY7F1513HKuEJ8KsLWWZWV9OPU4j4I4Rt+WJNlKjbD2srHxyrS2RDsr91 -8nCkNoWVNO3sZq0XkWKecdc921vL4ginAgMBAAGjVDBSMA4GA1UdDwEB/wQEAwIC -pDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBoGA1UdEQQT -MBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAGcU3iyLBIVZj -aDzSvEDHUd1bnLBl1C58Xu/CyKlPqVU7mLfK0JcgEaYQTSX6fCJVNLbbCrcGLsPJ -fbjlBbyeLjTV413fxPVuona62pBFjqdtbli2Qe8FRH2KBdm41JUJGdo+SdsFu7nc -BFOcubdw6LLIXvsTvwndKcHWx1rMX709QU1Vn1GAIsbJV/DWI231Jyyb+lxAUx/C -8vce5uVxiKcGS+g6OjsN3D3TtiEQGSXLh013W6Wsih8td8yMCMZ3w8LQ38br1GUe -ahLIgUJ9l6HDguM17R7kGqxNvbElsMUHfTtXXP7UDQUiYXDakg8xDP6n9DCDhJ8Y -bSt7OLB7NQ== ------END CERTIFICATE-----`) - - // KeyPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair) - KeyPEMBlock = []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA146jurJLz9N5OfAjY0IIHfRv5rflmvsUVPll4iggh7bWsDPM -frJaucn7BwsMZcLBp+/R5iannDaoB29hlgVwL1VaJBQ03AtYVD+PoKdoLvTctu1B -045S5jtJf8WeTDfLXysmJRcNfLV7t35Gj8h4/L/w9UQ5LqOAEXRbxknvE6orG4S5 -NvvaMMXyBmxDPRXYsPKgcgtvnfhX78yCs9xn0FcJ9FaEaJvC+jQSADatSBqweSXn -2Qep7w537sZAZmOxdeddxyrhCfCrC1lmVlfTj1OI+COEbfliTZSo2w9rKx8cq0tk -Q7K/dfJwpDaFlTTt7GatF5FinnHXPdtby+IIpwIDAQABAoIBAAJK4RDmPooqTJrC -JA41MJLo+5uvjwCT9QZmVKAQHzByUFw1YNJkITTiognUI0CdzqNzmH7jIFs39ZeG -proKusO2G6xQjrNcZ4cV2fgyb5g4QHStl0qhs94A+WojduiGm2IaumAgm6Mc5wDv -ld6HmknN3Mku/ZCyanVFEIjOVn2WB7ZQLTBs6ZYaebTJG2Xv6p9t2YJW7pPQ9Xce -s9ohAWohyM4X/OvfnfnLtQp2YLw/BxwehBsCR5SXM3ibTKpFNtxJC8hIfTuWtxZu -2ywrmXShYBRB1WgtZt5k04bY/HFncvvcHK3YfI1+w4URKtwdaQgPUQRbVwDwuyBn -flfkCJECgYEA/eWt01iEyE/lXkGn6V9lCocUU7lCU6yk5UT8VXVUc5If4KZKPfCk -p4zJDOqwn2eM673aWz/mG9mtvAvmnugaGjcaVCyXOp/D/GDmKSoYcvW5B/yjfkLy -dK6Yaa5LDRVYlYgyzcdCT5/9Qc626NzFwKCZNI4ncIU8g7ViATRxWJ8CgYEA2Ver -vZ0M606sfgC0H3NtwNBxmuJ+lIF5LNp/wDi07lDfxRR1rnZMX5dnxjcpDr/zvm8J -WtJJX3xMgqjtHuWKL3yKKony9J5ZPjichSbSbhrzfovgYIRZLxLLDy4MP9L3+CX/ -yBXnqMWuSnFX+M5fVGxdDWiYF3V+wmeOv9JvavkCgYEAiXAPDFzaY+R78O3xiu7M -r0o3wqqCMPE/wav6O/hrYrQy9VSO08C0IM6g9pEEUwWmzuXSkZqhYWoQFb8Lc/GI -T7CMXAxXQLDDUpbRgG79FR3Wr3AewHZU8LyiXHKwxcBMV4WGmsXGK3wbh8fyU1NO -6NsGk+BvkQVOoK1LBAPzZ1kCgYEAsBSmD8U33T9s4dxiEYTrqyV0lH3g/SFz8ZHH -pAyNEPI2iC1ONhyjPWKlcWHpAokiyOqeUpVBWnmSZtzC1qAydsxYB6ShT+sl9BHb -RMix/QAauzBJhQhUVJ3OIys0Q1UBDmqCsjCE8SfOT4NKOUnA093C+YT+iyrmmktZ -zDCJkckCgYEAndqM5KXGk5xYo+MAA1paZcbTUXwaWwjLU+XSRSSoyBEi5xMtfvUb -7+a1OMhLwWbuz+pl64wFKrbSUyimMOYQpjVE/1vk/kb99pxbgol27hdKyTH1d+ov -kFsxKCqxAnBVGEWAvVZAiiTOxleQFjz5RnL0BQp9Lg2cQe+dvuUmIAA= ------END RSA PRIVATE KEY-----`) -) - -func testShutdownBindPort(t *testing.T, protocol string, port string) { - handler := NewServeMux() - handler.HandleFunc(".", func(w ResponseWriter, r *Msg) {}) - startedCh := make(chan struct{}) - s := &Server{ - Addr: net.JoinHostPort("127.0.0.1", port), - Net: protocol, - Handler: handler, - NotifyStartedFunc: func() { - startedCh <- struct{}{} - }, - } - go func() { - if err := s.ListenAndServe(); err != nil { - t.Log(err) - } - }() - <-startedCh - t.Logf("DNS server is started on: %s", s.Addr) - if err := s.Shutdown(); err != nil { - t.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - go func() { - if err := s.ListenAndServe(); err != nil { - t.Fatal(err) - } - }() - <-startedCh - t.Logf("DNS server is started on: %s", s.Addr) -} - -func TestShutdownBindPortUDP(t *testing.T) { - testShutdownBindPort(t, "udp", "1153") -} - -func TestShutdownBindPortTCP(t *testing.T) { - testShutdownBindPort(t, "tcp", "1154") -} diff --git a/vendor/github.com/miekg/dns/sig0_test.go b/vendor/github.com/miekg/dns/sig0_test.go deleted file mode 100644 index 122de6a8..00000000 --- a/vendor/github.com/miekg/dns/sig0_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package dns - -import ( - "crypto" - "testing" - "time" -) - -func TestSIG0(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - m := new(Msg) - m.SetQuestion("example.org.", TypeSOA) - for _, alg := range []uint8{ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256, RSASHA512} { - algstr := AlgorithmToString[alg] - keyrr := new(KEY) - keyrr.Hdr.Name = algstr + "." - keyrr.Hdr.Rrtype = TypeKEY - keyrr.Hdr.Class = ClassINET - keyrr.Algorithm = alg - keysize := 1024 - switch alg { - case ECDSAP256SHA256: - keysize = 256 - case ECDSAP384SHA384: - keysize = 384 - } - pk, err := keyrr.Generate(keysize) - if err != nil { - t.Errorf("failed to generate key for “%sâ€: %v", algstr, err) - continue - } - now := uint32(time.Now().Unix()) - sigrr := new(SIG) - sigrr.Hdr.Name = "." - sigrr.Hdr.Rrtype = TypeSIG - sigrr.Hdr.Class = ClassANY - sigrr.Algorithm = alg - sigrr.Expiration = now + 300 - sigrr.Inception = now - 300 - sigrr.KeyTag = keyrr.KeyTag() - sigrr.SignerName = keyrr.Hdr.Name - mb, err := sigrr.Sign(pk.(crypto.Signer), m) - if err != nil { - t.Errorf("failed to sign message using “%sâ€: %v", algstr, err) - continue - } - m := new(Msg) - if err := m.Unpack(mb); err != nil { - t.Errorf("failed to unpack message signed using “%sâ€: %v", algstr, err) - continue - } - if len(m.Extra) != 1 { - t.Errorf("missing SIG for message signed using “%sâ€", algstr) - continue - } - var sigrrwire *SIG - switch rr := m.Extra[0].(type) { - case *SIG: - sigrrwire = rr - default: - t.Errorf("expected SIG RR, instead: %v", rr) - continue - } - for _, rr := range []*SIG{sigrr, sigrrwire} { - id := "sigrr" - if rr == sigrrwire { - id = "sigrrwire" - } - if err := rr.Verify(keyrr, mb); err != nil { - t.Errorf("failed to verify “%s†signed SIG(%s): %v", algstr, id, err) - continue - } - } - mb[13]++ - if err := sigrr.Verify(keyrr, mb); err == nil { - t.Errorf("verify succeeded on an altered message using “%sâ€", algstr) - continue - } - sigrr.Expiration = 2 - sigrr.Inception = 1 - mb, _ = sigrr.Sign(pk.(crypto.Signer), m) - if err := sigrr.Verify(keyrr, mb); err == nil { - t.Errorf("verify succeeded on an expired message using “%sâ€", algstr) - continue - } - } -} diff --git a/vendor/github.com/miekg/dns/tsig_test.go b/vendor/github.com/miekg/dns/tsig_test.go deleted file mode 100644 index 4bc52733..00000000 --- a/vendor/github.com/miekg/dns/tsig_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package dns - -import ( - "encoding/binary" - "testing" - "time" -) - -func newTsig(algo string) *Msg { - m := new(Msg) - m.SetQuestion("example.org.", TypeA) - m.SetTsig("example.", algo, 300, time.Now().Unix()) - return m -} - -func TestTsig(t *testing.T) { - m := newTsig(HmacMD5) - buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } - err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } - - // TSIG accounts for ID substitution. This means if the message ID is - // changed by a forwarder, we should still be able to verify the TSIG. - m = newTsig(HmacMD5) - buf, _, err = TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } - - binary.BigEndian.PutUint16(buf[0:2], uint16(42)) - err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } -} - -func TestTsigCase(t *testing.T) { - m := newTsig("HmAc-mD5.sig-ALg.rEg.int.") // HmacMD5 - buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } - err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go deleted file mode 100644 index dd131094..00000000 --- a/vendor/github.com/miekg/dns/types_generate.go +++ /dev/null @@ -1,271 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -var skipLen = map[string]struct{}{ - "NSEC": {}, - "NSEC3": {}, - "OPT": {}, -} - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from type_generate.go - -package dns - -import ( - "encoding/base64" - "net" -) - -` - -var TypeToRR = template.Must(template.New("TypeToRR").Parse(` -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ -{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, -{{end}}{{end}} } - -`)) - -var typeToString = template.Must(template.New("typeToString").Parse(` -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ -{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", -{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", -} - -`)) - -var headerFunc = template.Must(template.New("headerFunc").Parse(` -// Header() functions -{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } -{{end}} - -`)) - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect constants like TypeX - var numberedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - b, ok := o.Type().(*types.Basic) - if !ok || b.Kind() != types.Uint16 { - continue - } - if !strings.HasPrefix(o.Name(), "Type") { - continue - } - name := strings.TrimPrefix(o.Name(), "Type") - if name == "PrivateRR" { - continue - } - numberedTypes = append(numberedTypes, name) - } - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate TypeToRR - fatalIfErr(TypeToRR.Execute(b, namedTypes)) - - // Generate typeToString - fatalIfErr(typeToString.Execute(b, numberedTypes)) - - // Generate headerFunc - fatalIfErr(headerFunc.Execute(b, namedTypes)) - - // Generate len() - fmt.Fprint(b, "// len() functions\n") - for _, name := range namedTypes { - if _, ok := skipLen[name]; ok { - continue - } - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) - fmt.Fprintf(b, "l := rr.Hdr.len()\n") - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: - o("for _, x := range rr.%s { l += len(x) + 1 }\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: - // ignored - case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: - o("l += len(rr.%s) + 1\n") - case st.Tag(i) == `dns:"octet"`: - o("l += len(rr.%s)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("l += len(rr.%s)/2 + 1\n") - case st.Tag(i) == `dns:"a"`: - o("l += net.IPv4len // %s\n") - case st.Tag(i) == `dns:"aaaa"`: - o("l += net.IPv6len // %s\n") - case st.Tag(i) == `dns:"txt"`: - o("for _, t := range rr.%s { l += len(t) + 1 }\n") - case st.Tag(i) == `dns:"uint48"`: - o("l += 6 // %s\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("l++ // %s\n") - case types.Uint16: - o("l += 2 // %s\n") - case types.Uint32: - o("l += 4 // %s\n") - case types.Uint64: - o("l += 8 // %s\n") - case types.String: - o("l += len(rr.%s) + 1\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintf(b, "return l }\n") - } - - // Generate copy() - fmt.Fprint(b, "// copy() functions\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"*rr.Hdr.copyHeader()"} - for i := 1; i < st.NumFields(); i++ { - f := st.Field(i).Name() - if sl, ok := st.Field(i).Type().(*types.Slice); ok { - t := sl.Underlying().String() - t = strings.TrimPrefix(t, "[]") - if strings.Contains(t, ".") { - splits := strings.Split(t, ".") - t = splits[len(splits)-1] - } - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - if st.Field(i).Type().String() == "net.IP" { - fields = append(fields, "copyIP(rr."+f+")") - continue - } - fields = append(fields, "rr."+f) - } - fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) - fmt.Fprintf(b, "}\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("ztypes.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/types_test.go b/vendor/github.com/miekg/dns/types_test.go deleted file mode 100644 index c117cfbc..00000000 --- a/vendor/github.com/miekg/dns/types_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package dns - -import ( - "testing" -) - -func TestCmToM(t *testing.T) { - s := cmToM(0, 0) - if s != "0.00" { - t.Error("0, 0") - } - - s = cmToM(1, 0) - if s != "0.01" { - t.Error("1, 0") - } - - s = cmToM(3, 1) - if s != "0.30" { - t.Error("3, 1") - } - - s = cmToM(4, 2) - if s != "4" { - t.Error("4, 2") - } - - s = cmToM(5, 3) - if s != "50" { - t.Error("5, 3") - } - - s = cmToM(7, 5) - if s != "7000" { - t.Error("7, 5") - } - - s = cmToM(9, 9) - if s != "90000000" { - t.Error("9, 9") - } -} - -func TestSplitN(t *testing.T) { - xs := splitN("abc", 5) - if len(xs) != 1 && xs[0] != "abc" { - t.Errorf("Failure to split abc") - } - - s := "" - for i := 0; i < 255; i++ { - s += "a" - } - - xs = splitN(s, 255) - if len(xs) != 1 && xs[0] != s { - t.Errorf("failure to split 255 char long string") - } - - s += "b" - xs = splitN(s, 255) - if len(xs) != 2 || xs[1] != "b" { - t.Errorf("failure to split 256 char long string: %d", len(xs)) - } - - // Make s longer - for i := 0; i < 255; i++ { - s += "a" - } - xs = splitN(s, 255) - if len(xs) != 3 || xs[2] != "a" { - t.Errorf("failure to split 510 char long string: %d", len(xs)) - } -} diff --git a/vendor/github.com/miekg/dns/update_test.go b/vendor/github.com/miekg/dns/update_test.go deleted file mode 100644 index 12760a1e..00000000 --- a/vendor/github.com/miekg/dns/update_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package dns - -import ( - "bytes" - "testing" -) - -func TestDynamicUpdateParsing(t *testing.T) { - prefix := "example.com. IN " - for _, typ := range TypeToString { - if typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" || - typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" || - typ == "Reserved" || typ == "None" || typ == "NXT" || typ == "MAILB" || typ == "MAILA" { - continue - } - r, err := NewRR(prefix + typ) - if err != nil { - t.Errorf("failure to parse: %s %s: %v", prefix, typ, err) - } else { - t.Logf("parsed: %s", r.String()) - } - } -} - -func TestDynamicUpdateUnpack(t *testing.T) { - // From https://github.com/miekg/dns/issues/150#issuecomment-62296803 - // It should be an update message for the zone "example.", - // deleting the A RRset "example." and then adding an A record at "example.". - // class ANY, TYPE A - buf := []byte{171, 68, 40, 0, 0, 1, 0, 0, 0, 2, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 0, 0, 6, 0, 1, 192, 12, 0, 1, 0, 255, 0, 0, 0, 0, 0, 0, 192, 12, 0, 1, 0, 1, 0, 0, 0, 0, 0, 4, 127, 0, 0, 1} - msg := new(Msg) - err := msg.Unpack(buf) - if err != nil { - t.Errorf("failed to unpack: %v\n%s", err, msg.String()) - } -} - -func TestDynamicUpdateZeroRdataUnpack(t *testing.T) { - m := new(Msg) - rr := &RR_Header{Name: ".", Rrtype: 0, Class: 1, Ttl: ^uint32(0), Rdlength: 0} - m.Answer = []RR{rr, rr, rr, rr, rr} - m.Ns = m.Answer - for n, s := range TypeToString { - rr.Rrtype = n - bytes, err := m.Pack() - if err != nil { - t.Errorf("failed to pack %s: %v", s, err) - continue - } - if err := new(Msg).Unpack(bytes); err != nil { - t.Errorf("failed to unpack %s: %v", s, err) - } - } -} - -func TestRemoveRRset(t *testing.T) { - // Should add a zero data RR in Class ANY with a TTL of 0 - // for each set mentioned in the RRs provided to it. - rr, err := NewRR(". 100 IN A 127.0.0.1") - if err != nil { - t.Fatalf("error constructing RR: %v", err) - } - m := new(Msg) - m.Ns = []RR{&RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY, Ttl: 0, Rdlength: 0}} - expectstr := m.String() - expect, err := m.Pack() - if err != nil { - t.Fatalf("error packing expected msg: %v", err) - } - - m.Ns = nil - m.RemoveRRset([]RR{rr}) - actual, err := m.Pack() - if err != nil { - t.Fatalf("error packing actual msg: %v", err) - } - if !bytes.Equal(actual, expect) { - tmp := new(Msg) - if err := tmp.Unpack(actual); err != nil { - t.Fatalf("error unpacking actual msg: %v\nexpected: %v\ngot: %v\n", err, expect, actual) - } - t.Errorf("expected msg:\n%s", expectstr) - t.Errorf("actual msg:\n%v", tmp) - } -} - -func TestPreReqAndRemovals(t *testing.T) { - // Build a list of multiple prereqs and then somes removes followed by an insert. - // We should be able to add multiple prereqs and updates. - m := new(Msg) - m.SetUpdate("example.org.") - m.Id = 1234 - - // Use a full set of RRs each time, so we are sure the rdata is stripped. - rrName1, _ := NewRR("name_used. 3600 IN A 127.0.0.1") - rrName2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1") - rrRemove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1") - rrRemove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1") - rrRemove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1") - rrInsert, _ := NewRR("insert. 3600 IN A 127.0.0.1") - rrRrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1") - rrRrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1") - rrRrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1") - - // Handle the prereqs. - m.NameUsed([]RR{rrName1}) - m.NameNotUsed([]RR{rrName2}) - m.RRsetUsed([]RR{rrRrset1}) - m.Used([]RR{rrRrset2}) - m.RRsetNotUsed([]RR{rrRrset3}) - - // and now the updates. - m.RemoveName([]RR{rrRemove1}) - m.RemoveRRset([]RR{rrRemove2}) - m.Remove([]RR{rrRemove3}) - m.Insert([]RR{rrInsert}) - - // This test function isn't a Example function because we print these RR with tabs at the - // end and the Example function trim these, thus they never match. - // TODO(miek): don't print these tabs and make this into an Example function. - expect := `;; opcode: UPDATE, status: NOERROR, id: 1234 -;; flags:; QUERY: 1, ANSWER: 5, AUTHORITY: 4, ADDITIONAL: 0 - -;; QUESTION SECTION: -;example.org. IN SOA - -;; ANSWER SECTION: -name_used. 0 ANY ANY -name_not_used. 0 NONE ANY -rrset_used1. 0 ANY A -rrset_used2. 3600 IN A 127.0.0.1 -rrset_not_used. 0 NONE A - -;; AUTHORITY SECTION: -remove1. 0 ANY ANY -remove2. 0 ANY A -remove3. 0 NONE A 127.0.0.1 -insert. 3600 IN A 127.0.0.1 -` - - if m.String() != expect { - t.Errorf("expected msg:\n%s", expect) - t.Errorf("actual msg:\n%v", m.String()) - } -} diff --git a/vendor/github.com/miekg/dns/xfr_test.go b/vendor/github.com/miekg/dns/xfr_test.go deleted file mode 100644 index a478963a..00000000 --- a/vendor/github.com/miekg/dns/xfr_test.go +++ /dev/null @@ -1,184 +0,0 @@ -// +build net - -package dns - -import ( - "net" - "strings" - "testing" - "time" -) - -func getIP(s string) string { - a, err := net.LookupAddr(s) - if err != nil { - return "" - } - return a[0] -} - -// flaky, need to setup local server and test from that. -func TestAXFR_Miek(t *testing.T) { - // This test runs against a server maintained by Miek - if testing.Short() { - return - } - m := new(Msg) - m.SetAxfr("miek.nl.") - - server := getIP("linode.atoom.net") - - tr := new(Transfer) - - if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { - t.Fatal("failed to setup axfr: ", err) - } else { - for ex := range a { - if ex.Error != nil { - t.Errorf("error %v", ex.Error) - break - } - for _, rr := range ex.RR { - t.Log(rr.String()) - } - } - } -} - -// fails. -func TestAXFR_NLNL_MultipleEnvelopes(t *testing.T) { - // This test runs against a server maintained by NLnet Labs - if testing.Short() { - return - } - m := new(Msg) - m.SetAxfr("nlnetlabs.nl.") - - server := getIP("open.nlnetlabs.nl.") - - tr := new(Transfer) - if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { - t.Fatalf("failed to setup axfr %v for server: %v", err, server) - } else { - for ex := range a { - if ex.Error != nil { - t.Errorf("error %v", ex.Error) - break - } - } - } -} - -func TestAXFR_Miek_Tsig(t *testing.T) { - // This test runs against a server maintained by Miek - if testing.Short() { - return - } - m := new(Msg) - m.SetAxfr("example.nl.") - m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix()) - - tr := new(Transfer) - tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - - if a, err := tr.In(m, "176.58.119.54:53"); err != nil { - t.Fatal("failed to setup axfr: ", err) - } else { - for ex := range a { - if ex.Error != nil { - t.Errorf("error %v", ex.Error) - break - } - for _, rr := range ex.RR { - t.Log(rr.String()) - } - } - } -} - -func TestAXFR_SIDN_NSD3_NONE(t *testing.T) { testAXFRSIDN(t, "nsd", "") } -func TestAXFR_SIDN_NSD3_MD5(t *testing.T) { testAXFRSIDN(t, "nsd", HmacMD5) } -func TestAXFR_SIDN_NSD3_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA1) } -func TestAXFR_SIDN_NSD3_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA256) } - -func TestAXFR_SIDN_NSD4_NONE(t *testing.T) { testAXFRSIDN(t, "nsd4", "") } -func TestAXFR_SIDN_NSD4_MD5(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacMD5) } -func TestAXFR_SIDN_NSD4_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA1) } -func TestAXFR_SIDN_NSD4_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA256) } - -func TestAXFR_SIDN_BIND9_NONE(t *testing.T) { testAXFRSIDN(t, "bind9", "") } -func TestAXFR_SIDN_BIND9_MD5(t *testing.T) { testAXFRSIDN(t, "bind9", HmacMD5) } -func TestAXFR_SIDN_BIND9_SHA1(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA1) } -func TestAXFR_SIDN_BIND9_SHA256(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA256) } - -func TestAXFR_SIDN_KNOT_NONE(t *testing.T) { testAXFRSIDN(t, "knot", "") } -func TestAXFR_SIDN_KNOT_MD5(t *testing.T) { testAXFRSIDN(t, "knot", HmacMD5) } -func TestAXFR_SIDN_KNOT_SHA1(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA1) } -func TestAXFR_SIDN_KNOT_SHA256(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA256) } - -func TestAXFR_SIDN_POWERDNS_NONE(t *testing.T) { testAXFRSIDN(t, "powerdns", "") } -func TestAXFR_SIDN_POWERDNS_MD5(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacMD5) } -func TestAXFR_SIDN_POWERDNS_SHA1(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA1) } -func TestAXFR_SIDN_POWERDNS_SHA256(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA256) } - -func TestAXFR_SIDN_YADIFA_NONE(t *testing.T) { testAXFRSIDN(t, "yadifa", "") } -func TestAXFR_SIDN_YADIFA_MD5(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacMD5) } -func TestAXFR_SIDN_YADIFA_SHA1(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA1) } -func TestAXFR_SIDN_YADIFA_SHA256(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA256) } - -func testAXFRSIDN(t *testing.T, host, alg string) { - // This tests run against a server maintained by SIDN labs, see: - // https://workbench.sidnlabs.nl/ - if testing.Short() { - return - } - x := new(Transfer) - x.TsigSecret = map[string]string{ - "wb_md5.": "Wu/utSasZUkoeCNku152Zw==", - "wb_sha1_longkey.": "uhMpEhPq/RAD9Bt4mqhfmi+7ZdKmjLQb/lcrqYPXR4s/nnbsqw==", - "wb_sha256.": "npfrIJjt/MJOjGJoBNZtsjftKMhkSpIYMv2RzRZt1f8=", - } - keyname := map[string]string{ - HmacMD5: "wb_md5.", - HmacSHA1: "wb_sha1_longkey.", - HmacSHA256: "wb_sha256.", - }[alg] - - m := new(Msg) - m.SetAxfr("types.wb.sidnlabs.nl.") - if keyname != "" { - m.SetTsig(keyname, alg, 300, time.Now().Unix()) - } - c, err := x.In(m, host+".sidnlabs.nl:53") - if err != nil { - t.Fatal(err) - } - for e := range c { - if e.Error != nil { - t.Fatal(e.Error) - } - } -} - -func TestAXFRFailNotAuth(t *testing.T) { - // This tests run against a server maintained by SIDN labs, see: - // https://workbench.sidnlabs.nl/ - if testing.Short() { - return - } - x := new(Transfer) - - m := new(Msg) - m.SetAxfr("sidnlabs.nl.") - c, err := x.In(m, "yadifa.sidnlabs.nl:53") - if err != nil { - t.Fatal(err) - } - for e := range c { - if e.Error != nil { - if !strings.HasPrefix(e.Error.Error(), "dns: bad xfr rcode:") { - t.Fatal(e.Error) - } - } - } -} diff --git a/vendor/github.com/pborman/uuid/marshal_test.go b/vendor/github.com/pborman/uuid/marshal_test.go deleted file mode 100644 index 4e85b6ba..00000000 --- a/vendor/github.com/pborman/uuid/marshal_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "encoding/json" - "reflect" - "testing" -) - -var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") -var testArray = testUUID.Array() - -func TestJSON(t *testing.T) { - type S struct { - ID1 UUID - ID2 UUID - } - s1 := S{ID1: testUUID} - data, err := json.Marshal(&s1) - if err != nil { - t.Fatal(err) - } - var s2 S - if err := json.Unmarshal(data, &s2); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(&s1, &s2) { - t.Errorf("got %#v, want %#v", s2, s1) - } -} - -func TestJSONArray(t *testing.T) { - type S struct { - ID1 Array - ID2 Array - } - s1 := S{ID1: testArray} - data, err := json.Marshal(&s1) - if err != nil { - t.Fatal(err) - } - var s2 S - if err := json.Unmarshal(data, &s2); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(&s1, &s2) { - t.Errorf("got %#v, want %#v", s2, s1) - } -} - -func TestMarshal(t *testing.T) { - data, err := testUUID.MarshalBinary() - if err != nil { - t.Fatalf("MarhsalBinary returned unexpected error %v", err) - } - if !bytes.Equal(data, testUUID) { - t.Fatalf("MarhsalBinary returns %x, want %x", data, testUUID) - } - var u UUID - u.UnmarshalBinary(data) - if !Equal(data, u) { - t.Fatalf("UnmarhsalBinary returns %v, want %v", u, testUUID) - } -} - -func TestMarshalArray(t *testing.T) { - data, err := testArray.MarshalBinary() - if err != nil { - t.Fatalf("MarhsalBinary returned unexpected error %v", err) - } - if !bytes.Equal(data, testUUID) { - t.Fatalf("MarhsalBinary returns %x, want %x", data, testUUID) - } - var a Array - a.UnmarshalBinary(data) - if a != testArray { - t.Fatalf("UnmarhsalBinary returns %v, want %v", a, testArray) - } -} - -func TestMarshalTextArray(t *testing.T) { - data, err := testArray.MarshalText() - if err != nil { - t.Fatalf("MarhsalText returned unexpected error %v", err) - } - var a Array - a.UnmarshalText(data) - if a != testArray { - t.Fatalf("UnmarhsalText returns %v, want %v", a, testArray) - } -} - -func BenchmarkUUID_MarshalJSON(b *testing.B) { - x := &struct { - UUID UUID `json:"uuid"` - }{} - x.UUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") - if x.UUID == nil { - b.Fatal("invalid uuid") - } - for i := 0; i < b.N; i++ { - js, err := json.Marshal(x) - if err != nil { - b.Fatalf("marshal json: %#v (%v)", js, err) - } - } -} - -func BenchmarkUUID_UnmarshalJSON(b *testing.B) { - js := []byte(`{"uuid":"f47ac10b-58cc-0372-8567-0e02b2c3d479"}`) - var x *struct { - UUID UUID `json:"uuid"` - } - for i := 0; i < b.N; i++ { - err := json.Unmarshal(js, &x) - if err != nil { - b.Fatalf("marshal json: %#v (%v)", js, err) - } - } -} diff --git a/vendor/github.com/pborman/uuid/seq_test.go b/vendor/github.com/pborman/uuid/seq_test.go deleted file mode 100644 index 3b3d1430..00000000 --- a/vendor/github.com/pborman/uuid/seq_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "flag" - "runtime" - "testing" - "time" -) - -// This test is only run when --regressions is passed on the go test line. -var regressions = flag.Bool("regressions", false, "run uuid regression tests") - -// TestClockSeqRace tests for a particular race condition of returning two -// identical Version1 UUIDs. The duration of 1 minute was chosen as the race -// condition, before being fixed, nearly always occured in under 30 seconds. -func TestClockSeqRace(t *testing.T) { - if !*regressions { - t.Skip("skipping regression tests") - } - duration := time.Minute - - done := make(chan struct{}) - defer close(done) - - ch := make(chan UUID, 10000) - ncpu := runtime.NumCPU() - switch ncpu { - case 0, 1: - // We can't run the test effectively. - t.Skip("skipping race test, only one CPU detected") - return - default: - runtime.GOMAXPROCS(ncpu) - } - for i := 0; i < ncpu; i++ { - go func() { - for { - select { - case <-done: - return - case ch <- NewUUID(): - } - } - }() - } - - uuids := make(map[string]bool) - cnt := 0 - start := time.Now() - for u := range ch { - s := u.String() - if uuids[s] { - t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s) - return - } - uuids[s] = true - if time.Since(start) > duration { - return - } - cnt++ - } -} diff --git a/vendor/github.com/pborman/uuid/sql_test.go b/vendor/github.com/pborman/uuid/sql_test.go deleted file mode 100644 index 10309515..00000000 --- a/vendor/github.com/pborman/uuid/sql_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "strings" - "testing" -) - -func TestScan(t *testing.T) { - var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479" - var byteTest []byte = Parse(stringTest) - var badTypeTest int = 6 - var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4" - - // sunny day tests - - var uuid UUID - err := (&uuid).Scan(stringTest) - if err != nil { - t.Fatal(err) - } - - err = (&uuid).Scan([]byte(stringTest)) - if err != nil { - t.Fatal(err) - } - - err = (&uuid).Scan(byteTest) - if err != nil { - t.Fatal(err) - } - - // bad type tests - - err = (&uuid).Scan(badTypeTest) - if err == nil { - t.Error("int correctly parsed and shouldn't have") - } - if !strings.Contains(err.Error(), "unable to scan type") { - t.Error("attempting to parse an int returned an incorrect error message") - } - - // invalid/incomplete uuids - - err = (&uuid).Scan(invalidTest) - if err == nil { - t.Error("invalid uuid was parsed without error") - } - if !strings.Contains(err.Error(), "invalid UUID") { - t.Error("attempting to parse an invalid UUID returned an incorrect error message") - } - - err = (&uuid).Scan(byteTest[:len(byteTest)-2]) - if err == nil { - t.Error("invalid byte uuid was parsed without error") - } - if !strings.Contains(err.Error(), "invalid UUID") { - t.Error("attempting to parse an invalid byte UUID returned an incorrect error message") - } - - // empty tests - - uuid = nil - var emptySlice []byte - err = (&uuid).Scan(emptySlice) - if err != nil { - t.Fatal(err) - } - - if uuid != nil { - t.Error("UUID was not nil after scanning empty byte slice") - } - - uuid = nil - var emptyString string - err = (&uuid).Scan(emptyString) - if err != nil { - t.Fatal(err) - } - - if uuid != nil { - t.Error("UUID was not nil after scanning empty string") - } -} - -func TestValue(t *testing.T) { - stringTest := "f47ac10b-58cc-0372-8567-0e02b2c3d479" - uuid := Parse(stringTest) - val, _ := uuid.Value() - if val != stringTest { - t.Error("Value() did not return expected string") - } -} diff --git a/vendor/github.com/pborman/uuid/uuid_test.go b/vendor/github.com/pborman/uuid/uuid_test.go deleted file mode 100644 index 03872396..00000000 --- a/vendor/github.com/pborman/uuid/uuid_test.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "fmt" - "os" - "strings" - "testing" - "time" -) - -type test struct { - in string - version Version - variant Variant - isuuid bool -} - -var tests = []test{ - {"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true}, - {"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true}, - {"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true}, - {"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true}, - {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, - {"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true}, - {"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true}, - {"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true}, - {"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true}, - {"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true}, - {"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true}, - {"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true}, - {"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true}, - {"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true}, - {"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true}, - {"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true}, - - {"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, - {"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, - {"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, - {"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true}, - {"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true}, - {"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true}, - {"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true}, - {"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true}, - {"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true}, - {"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true}, - {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, - {"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true}, - {"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true}, - {"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true}, - {"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true}, - {"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true}, - {"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true}, - {"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true}, - - {"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false}, - {"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false}, - {"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false}, - {"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false}, - {"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false}, - {"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false}, -} - -var constants = []struct { - c interface{} - name string -}{ - {Person, "Person"}, - {Group, "Group"}, - {Org, "Org"}, - {Invalid, "Invalid"}, - {RFC4122, "RFC4122"}, - {Reserved, "Reserved"}, - {Microsoft, "Microsoft"}, - {Future, "Future"}, - {Domain(17), "Domain17"}, - {Variant(42), "BadVariant42"}, -} - -func testTest(t *testing.T, in string, tt test) { - uuid := Parse(in) - if ok := (uuid != nil); ok != tt.isuuid { - t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid) - } - if uuid == nil { - return - } - - if v := uuid.Variant(); v != tt.variant { - t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant) - } - if v, _ := uuid.Version(); v != tt.version { - t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version) - } -} - -func TestUUID(t *testing.T) { - for _, tt := range tests { - testTest(t, tt.in, tt) - testTest(t, strings.ToUpper(tt.in), tt) - } -} - -func TestConstants(t *testing.T) { - for x, tt := range constants { - v, ok := tt.c.(fmt.Stringer) - if !ok { - t.Errorf("%x: %v: not a stringer", x, v) - } else if s := v.String(); s != tt.name { - v, _ := tt.c.(int) - t.Errorf("%x: Constant %T:%d gives %q, expected %q", x, tt.c, v, s, tt.name) - } - } -} - -func TestRandomUUID(t *testing.T) { - m := make(map[string]bool) - for x := 1; x < 32; x++ { - uuid := NewRandom() - s := uuid.String() - if m[s] { - t.Errorf("NewRandom returned duplicated UUID %s", s) - } - m[s] = true - if v, _ := uuid.Version(); v != 4 { - t.Errorf("Random UUID of version %s", v) - } - if uuid.Variant() != RFC4122 { - t.Errorf("Random UUID is variant %d", uuid.Variant()) - } - } -} - -func TestNew(t *testing.T) { - m := make(map[string]bool) - for x := 1; x < 32; x++ { - s := New() - if m[s] { - t.Errorf("New returned duplicated UUID %s", s) - } - m[s] = true - uuid := Parse(s) - if uuid == nil { - t.Errorf("New returned %q which does not decode", s) - continue - } - if v, _ := uuid.Version(); v != 4 { - t.Errorf("Random UUID of version %s", v) - } - if uuid.Variant() != RFC4122 { - t.Errorf("Random UUID is variant %d", uuid.Variant()) - } - } -} - -func clockSeq(t *testing.T, uuid UUID) int { - seq, ok := uuid.ClockSequence() - if !ok { - t.Fatalf("%s: invalid clock sequence", uuid) - } - return seq -} - -func TestClockSeq(t *testing.T) { - // Fake time.Now for this test to return a monotonically advancing time; restore it at end. - defer func(orig func() time.Time) { timeNow = orig }(timeNow) - monTime := time.Now() - timeNow = func() time.Time { - monTime = monTime.Add(1 * time.Second) - return monTime - } - - SetClockSequence(-1) - uuid1 := NewUUID() - uuid2 := NewUUID() - - if clockSeq(t, uuid1) != clockSeq(t, uuid2) { - t.Errorf("clock sequence %d != %d", clockSeq(t, uuid1), clockSeq(t, uuid2)) - } - - SetClockSequence(-1) - uuid2 = NewUUID() - - // Just on the very off chance we generated the same sequence - // two times we try again. - if clockSeq(t, uuid1) == clockSeq(t, uuid2) { - SetClockSequence(-1) - uuid2 = NewUUID() - } - if clockSeq(t, uuid1) == clockSeq(t, uuid2) { - t.Errorf("Duplicate clock sequence %d", clockSeq(t, uuid1)) - } - - SetClockSequence(0x1234) - uuid1 = NewUUID() - if seq := clockSeq(t, uuid1); seq != 0x1234 { - t.Errorf("%s: expected seq 0x1234 got 0x%04x", uuid1, seq) - } -} - -func TestCoding(t *testing.T) { - text := "7d444840-9dc0-11d1-b245-5ffdce74fad2" - urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2" - data := UUID{ - 0x7d, 0x44, 0x48, 0x40, - 0x9d, 0xc0, - 0x11, 0xd1, - 0xb2, 0x45, - 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, - } - if v := data.String(); v != text { - t.Errorf("%x: encoded to %s, expected %s", data, v, text) - } - if v := data.URN(); v != urn { - t.Errorf("%x: urn is %s, expected %s", data, v, urn) - } - - uuid := Parse(text) - if !Equal(uuid, data) { - t.Errorf("%s: decoded to %s, expected %s", text, uuid, data) - } -} - -func TestVersion1(t *testing.T) { - uuid1 := NewUUID() - uuid2 := NewUUID() - - if Equal(uuid1, uuid2) { - t.Errorf("%s:duplicate uuid", uuid1) - } - if v, _ := uuid1.Version(); v != 1 { - t.Errorf("%s: version %s expected 1", uuid1, v) - } - if v, _ := uuid2.Version(); v != 1 { - t.Errorf("%s: version %s expected 1", uuid2, v) - } - n1 := uuid1.NodeID() - n2 := uuid2.NodeID() - if !bytes.Equal(n1, n2) { - t.Errorf("Different nodes %x != %x", n1, n2) - } - t1, ok := uuid1.Time() - if !ok { - t.Errorf("%s: invalid time", uuid1) - } - t2, ok := uuid2.Time() - if !ok { - t.Errorf("%s: invalid time", uuid2) - } - q1, ok := uuid1.ClockSequence() - if !ok { - t.Errorf("%s: invalid clock sequence", uuid1) - } - q2, ok := uuid2.ClockSequence() - if !ok { - t.Errorf("%s: invalid clock sequence", uuid2) - } - - switch { - case t1 == t2 && q1 == q2: - t.Error("time stopped") - case t1 > t2 && q1 == q2: - t.Error("time reversed") - case t1 < t2 && q1 != q2: - t.Error("clock sequence chaned unexpectedly") - } -} - -func TestNode(t *testing.T) { - // This test is mostly to make sure we don't leave nodeMu locked. - ifname = "" - if ni := NodeInterface(); ni != "" { - t.Errorf("NodeInterface got %q, want %q", ni, "") - } - if SetNodeInterface("xyzzy") { - t.Error("SetNodeInterface succeeded on a bad interface name") - } - if !SetNodeInterface("") { - t.Error("SetNodeInterface failed") - } - if ni := NodeInterface(); ni == "" { - t.Error("NodeInterface returned an empty string") - } - - ni := NodeID() - if len(ni) != 6 { - t.Errorf("ni got %d bytes, want 6", len(ni)) - } - hasData := false - for _, b := range ni { - if b != 0 { - hasData = true - } - } - if !hasData { - t.Error("nodeid is all zeros") - } - - id := []byte{1, 2, 3, 4, 5, 6, 7, 8} - SetNodeID(id) - ni = NodeID() - if !bytes.Equal(ni, id[:6]) { - t.Errorf("got nodeid %v, want %v", ni, id[:6]) - } - - if ni := NodeInterface(); ni != "user" { - t.Errorf("got inteface %q, want %q", ni, "user") - } -} - -func TestNodeAndTime(t *testing.T) { - // Time is February 5, 1998 12:30:23.136364800 AM GMT - - uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") - node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2} - - ts, ok := uuid.Time() - if ok { - c := time.Unix(ts.UnixTime()) - want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC) - if !c.Equal(want) { - t.Errorf("Got time %v, want %v", c, want) - } - } else { - t.Errorf("%s: bad time", uuid) - } - if !bytes.Equal(node, uuid.NodeID()) { - t.Errorf("Expected node %v got %v", node, uuid.NodeID()) - } -} - -func TestMD5(t *testing.T) { - uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String() - want := "6fa459ea-ee8a-3ca4-894e-db77e160355e" - if uuid != want { - t.Errorf("MD5: got %q expected %q", uuid, want) - } -} - -func TestSHA1(t *testing.T) { - uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String() - want := "886313e1-3b8a-5372-9b90-0c9aee199e5d" - if uuid != want { - t.Errorf("SHA1: got %q expected %q", uuid, want) - } -} - -func TestNodeID(t *testing.T) { - nid := []byte{1, 2, 3, 4, 5, 6} - SetNodeInterface("") - s := NodeInterface() - if s == "" || s == "user" { - t.Errorf("NodeInterface %q after SetInteface", s) - } - node1 := NodeID() - if node1 == nil { - t.Error("NodeID nil after SetNodeInterface", s) - } - SetNodeID(nid) - s = NodeInterface() - if s != "user" { - t.Errorf("Expected NodeInterface %q got %q", "user", s) - } - node2 := NodeID() - if node2 == nil { - t.Error("NodeID nil after SetNodeID", s) - } - if bytes.Equal(node1, node2) { - t.Error("NodeID not changed after SetNodeID", s) - } else if !bytes.Equal(nid, node2) { - t.Errorf("NodeID is %x, expected %x", node2, nid) - } -} - -func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) { - if uuid == nil { - t.Errorf("%s failed", name) - return - } - if v, _ := uuid.Version(); v != 2 { - t.Errorf("%s: %s: expected version 2, got %s", name, uuid, v) - return - } - if v, ok := uuid.Domain(); !ok || v != domain { - if !ok { - t.Errorf("%s: %d: Domain failed", name, uuid) - } else { - t.Errorf("%s: %s: expected domain %d, got %d", name, uuid, domain, v) - } - } - if v, ok := uuid.Id(); !ok || v != id { - if !ok { - t.Errorf("%s: %d: Id failed", name, uuid) - } else { - t.Errorf("%s: %s: expected id %d, got %d", name, uuid, id, v) - } - } -} - -func TestDCE(t *testing.T) { - testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678) - testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid())) - testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid())) -} - -type badRand struct{} - -func (r badRand) Read(buf []byte) (int, error) { - for i, _ := range buf { - buf[i] = byte(i) - } - return len(buf), nil -} - -func TestBadRand(t *testing.T) { - SetRand(badRand{}) - uuid1 := New() - uuid2 := New() - if uuid1 != uuid2 { - t.Errorf("expected duplicates, got %q and %q", uuid1, uuid2) - } - SetRand(nil) - uuid1 = New() - uuid2 = New() - if uuid1 == uuid2 { - t.Errorf("unexpected duplicates, got %q", uuid1) - } -} - -func TestUUID_Array(t *testing.T) { - expect := Array{ - 0xf4, 0x7a, 0xc1, 0x0b, - 0x58, 0xcc, - 0x03, 0x72, - 0x85, 0x67, - 0x0e, 0x02, 0xb2, 0xc3, 0xd4, 0x79, - } - uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") - if uuid == nil { - t.Fatal("invalid uuid") - } - if uuid.Array() != expect { - t.Fatal("invalid array") - } -} - -func TestArray_UUID(t *testing.T) { - array := Array{ - 0xf4, 0x7a, 0xc1, 0x0b, - 0x58, 0xcc, - 0x03, 0x72, - 0x85, 0x67, - 0x0e, 0x02, 0xb2, 0xc3, 0xd4, 0x79, - } - expect := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") - if expect == nil { - t.Fatal("invalid uuid") - } - if !bytes.Equal(array.UUID(), expect) { - t.Fatal("invalid uuid") - } -} - -func BenchmarkParse(b *testing.B) { - for i := 0; i < b.N; i++ { - uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") - if uuid == nil { - b.Fatal("invalid uuid") - } - } -} - -func BenchmarkNew(b *testing.B) { - for i := 0; i < b.N; i++ { - New() - } -} - -func BenchmarkUUID_String(b *testing.B) { - uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") - if uuid == nil { - b.Fatal("invalid uuid") - } - for i := 0; i < b.N; i++ { - if uuid.String() == "" { - b.Fatal("invalid uuid") - } - } -} - -func BenchmarkUUID_URN(b *testing.B) { - uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") - if uuid == nil { - b.Fatal("invalid uuid") - } - for i := 0; i < b.N; i++ { - if uuid.URN() == "" { - b.Fatal("invalid uuid") - } - } -} - -func BenchmarkUUID_Array(b *testing.B) { - expect := Array{ - 0xf4, 0x7a, 0xc1, 0x0b, - 0x58, 0xcc, - 0x03, 0x72, - 0x85, 0x67, - 0x0e, 0x02, 0xb2, 0xc3, 0xd4, 0x79, - } - uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") - if uuid == nil { - b.Fatal("invalid uuid") - } - for i := 0; i < b.N; i++ { - if uuid.Array() != expect { - b.Fatal("invalid array") - } - } -} - -func BenchmarkArray_UUID(b *testing.B) { - array := Array{ - 0xf4, 0x7a, 0xc1, 0x0b, - 0x58, 0xcc, - 0x03, 0x72, - 0x85, 0x67, - 0x0e, 0x02, 0xb2, 0xc3, 0xd4, 0x79, - } - expect := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") - if expect == nil { - b.Fatal("invalid uuid") - } - for i := 0; i < b.N; i++ { - if !bytes.Equal(array.UUID(), expect) { - b.Fatal("invalid uuid") - } - } -} diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml new file mode 100644 index 00000000..20dd53b8 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/.travis.yml @@ -0,0 +1,23 @@ +language: go +sudo: false +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + - tip +matrix: + allow_failures: + - go: tip + fast_finish: true +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci +notifications: + email: false diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE new file mode 100644 index 00000000..926d5498 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2018 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md new file mode 100644 index 00000000..7b1a722d --- /dev/null +++ b/vendor/github.com/satori/go.uuid/README.md @@ -0,0 +1,65 @@ +# UUID package for Go language + +[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) +[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) +[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) + +This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. + +With 100% test coverage and benchmarks out of box. + +Supported versions: +* Version 1, based on timestamp and MAC address (RFC 4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing (RFC 4122) +* Version 4, based on random numbers (RFC 4122) +* Version 5, based on SHA-1 hashing (RFC 4122) + +## Installation + +Use the `go` command: + + $ go get github.com/satori/go.uuid + +## Requirements + +UUID package requires Go >= 1.2. + +## Example + +```go +package main + +import ( + "fmt" + "github.com/satori/go.uuid" +) + +func main() { + // Creating UUID Version 4 + u1 := uuid.NewV4() + fmt.Printf("UUIDv4: %s\n", u1) + + // Parsing UUID from string input + u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Printf("Something gone wrong: %s", err) + } + fmt.Printf("Successfully parsed: %s", u2) +} +``` + +## Documentation + +[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. + +## Links +* [RFC 4122](http://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) + +## Copyright + +Copyright (C) 2013-2018 by Maxim Bublis . + +UUID package released under MIT License. +See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go new file mode 100644 index 00000000..656892c5 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/codec.go @@ -0,0 +1,206 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "encoding/hex" + "fmt" +) + +// FromBytes returns UUID converted from raw byte slice input. +// It will return error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (u UUID, err error) { + err = u.UnmarshalBinary(input) + return +} + +// FromBytesOrNil returns UUID converted from raw byte slice input. +// Same behavior as FromBytes, but returns a Nil UUID on error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns UUID parsed from string input. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (u UUID, err error) { + err = u.UnmarshalText([]byte(input)) + return +} + +// FromStringOrNil returns UUID parsed from string input. +// Same behavior as FromString, but returns a Nil UUID on error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String. +func (u UUID) MarshalText() (text []byte, err error) { + text = []byte(u.String()) + return +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// ABNF for supported UUID text representation follows: +// uuid := canonical | hashlike | braced | urn +// plain := canonical | hashlike +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// hashlike := 12hexoct +// braced := '{' plain '}' +// urn := URN ':' UUID-NID ':' plain +// URN := 'urn' +// UUID-NID := 'uuid' +// 12hexoct := 6hexoct 6hexoct +// 6hexoct := 4hexoct 2hexoct +// 4hexoct := 2hexoct 2hexoct +// 2hexoct := hexoct hexoct +// hexoct := hexdig hexdig +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +func (u *UUID) UnmarshalText(text []byte) (err error) { + switch len(text) { + case 32: + return u.decodeHashLike(text) + case 36: + return u.decodeCanonical(text) + case 38: + return u.decodeBraced(text) + case 41: + fallthrough + case 45: + return u.decodeURN(text) + default: + return fmt.Errorf("uuid: incorrect UUID length: %s", text) + } +} + +// decodeCanonical decodes UUID string in format +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". +func (u *UUID) decodeCanonical(t []byte) (err error) { + if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format %s", t) + } + + src := t[:] + dst := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + src = src[1:] // skip dash + } + _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup]) + if err != nil { + return + } + src = src[byteGroup:] + dst = dst[byteGroup/2:] + } + + return +} + +// decodeHashLike decodes UUID string in format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeHashLike(t []byte) (err error) { + src := t[:] + dst := u[:] + + if _, err = hex.Decode(dst, src); err != nil { + return err + } + return +} + +// decodeBraced decodes UUID string in format +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format +// "{6ba7b8109dad11d180b400c04fd430c8}". +func (u *UUID) decodeBraced(t []byte) (err error) { + l := len(t) + + if t[0] != '{' || t[l-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format %s", t) + } + + return u.decodePlain(t[1 : l-1]) +} + +// decodeURN decodes UUID string in format +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeURN(t []byte) (err error) { + total := len(t) + + urn_uuid_prefix := t[:9] + + if !bytes.Equal(urn_uuid_prefix, urnPrefix) { + return fmt.Errorf("uuid: incorrect UUID format: %s", t) + } + + return u.decodePlain(t[9:total]) +} + +// decodePlain decodes UUID string in canonical format +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodePlain(t []byte) (err error) { + switch len(t) { + case 32: + return u.decodeHashLike(t) + case 36: + return u.decodeCanonical(t) + default: + return fmt.Errorf("uuid: incorrrect UUID length: %s", t) + } +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() (data []byte, err error) { + data = u.Bytes() + return +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) (err error) { + if len(data) != Size { + err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + return + } + copy(u[:], data) + + return +} diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go new file mode 100644 index 00000000..3f2f1da2 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/generator.go @@ -0,0 +1,239 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "hash" + "net" + "os" + "sync" + "time" +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +var ( + global = newDefaultGenerator() + + epochFunc = unixTimeFunc + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// NewV1 returns UUID based on current timestamp and MAC address. +func NewV1() UUID { + return global.NewV1() +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func NewV2(domain byte) UUID { + return global.NewV2(domain) +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + return global.NewV3(ns, name) +} + +// NewV4 returns random generated UUID. +func NewV4() UUID { + return global.NewV4() +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + return global.NewV5(ns, name) +} + +// Generator provides interface for generating UUIDs. +type Generator interface { + NewV1() UUID + NewV2(domain byte) UUID + NewV3(ns UUID, name string) UUID + NewV4() UUID + NewV5(ns UUID, name string) UUID +} + +// Default generator implementation. +type generator struct { + storageOnce sync.Once + storageMutex sync.Mutex + + lastTime uint64 + clockSequence uint16 + hardwareAddr [6]byte +} + +func newDefaultGenerator() Generator { + return &generator{} +} + +// NewV1 returns UUID based on current timestamp and MAC address. +func (g *generator) NewV1() UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := g.getStorage() + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + copy(u[10:], hardwareAddr) + + u.SetVersion(V1) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func (g *generator) NewV2(domain byte) UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := g.getStorage() + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[0:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[0:], posixGID) + } + + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + u[9] = domain + + copy(u[10:], hardwareAddr) + + u.SetVersion(V2) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func (g *generator) NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(V3) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV4 returns random generated UUID. +func (g *generator) NewV4() UUID { + u := UUID{} + g.safeRandom(u[:]) + u.SetVersion(V4) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func (g *generator) NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(V5) + u.SetVariant(VariantRFC4122) + + return u +} + +func (g *generator) initStorage() { + g.initClockSequence() + g.initHardwareAddr() +} + +func (g *generator) initClockSequence() { + buf := make([]byte, 2) + g.safeRandom(buf) + g.clockSequence = binary.BigEndian.Uint16(buf) +} + +func (g *generator) initHardwareAddr() { + interfaces, err := net.Interfaces() + if err == nil { + for _, iface := range interfaces { + if len(iface.HardwareAddr) >= 6 { + copy(g.hardwareAddr[:], iface.HardwareAddr) + return + } + } + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence + g.safeRandom(g.hardwareAddr[:]) + + // Set multicast bit as recommended in RFC 4122 + g.hardwareAddr[0] |= 0x01 +} + +func (g *generator) safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +// Returns UUID v1/v2 storage state. +// Returns epoch timestamp, clock sequence, and hardware address. +func (g *generator) getStorage() (uint64, uint16, []byte) { + g.storageOnce.Do(g.initStorage) + + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + timeNow := epochFunc() + // Clock changed backwards since last UUID generation. + // Should increase clock sequence. + if timeNow <= g.lastTime { + g.clockSequence++ + } + g.lastTime = timeNow + + return timeNow, g.clockSequence, g.hardwareAddr[:] +} + +// Returns difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and current time. +// This is default epoch calculation function. +func unixTimeFunc() uint64 { + return epochStart + uint64(time.Now().UnixNano()/100) +} + +// Returns UUID based on hashing of namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go new file mode 100644 index 00000000..56759d39 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/sql.go @@ -0,0 +1,78 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice is handled by UnmarshalBinary, while +// a longer byte slice or a string is handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + if len(src) == Size { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database +type NullUUID struct { + UUID UUID + Valid bool +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go new file mode 100644 index 00000000..a2b8e2ca --- /dev/null +++ b/vendor/github.com/satori/go.uuid/uuid.go @@ -0,0 +1,161 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementation of Universally Unique Identifier (UUID). +// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and +// version 2 (as specified in DCE 1.1). +package uuid + +import ( + "bytes" + "encoding/hex" +) + +// Size of a UUID in bytes. +const Size = 16 + +// UUID representation compliant with specification +// described in RFC 4122. +type UUID [Size]byte + +// UUID versions +const ( + _ byte = iota + V1 + V2 + V3 + V4 + V5 +) + +// UUID layout variants. +const ( + VariantNCS byte = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +// Nil is special form of UUID that is specified to have all +// 128 bits set to zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) +) + +// Equal returns true if u1 and u2 equals, otherwise returns false. +func Equal(u1 UUID, u2 UUID) bool { + return bytes.Equal(u1[:], u2[:]) +} + +// Version returns algorithm version used to generate UUID. +func (u UUID) Version() byte { + return u[6] >> 4 +} + +// Variant returns UUID layout variant. +func (u UUID) Variant() byte { + switch { + case (u[8] >> 7) == 0x00: + return VariantNCS + case (u[8] >> 6) == 0x02: + return VariantRFC4122 + case (u[8] >> 5) == 0x06: + return VariantMicrosoft + case (u[8] >> 5) == 0x07: + fallthrough + default: + return VariantFuture + } +} + +// Bytes returns bytes slice representation of UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// Returns canonical string representation of UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// SetVersion sets version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets variant bits. +func (u *UUID) SetVariant(v byte) { + switch v { + case VariantNCS: + u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + case VariantRFC4122: + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + case VariantMicrosoft: + u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + case VariantFuture: + fallthrough + default: + u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) + } +} + +// Must is a helper that wraps a call to a function returning (UUID, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")); +func Must(u UUID, err error) UUID { + if err != nil { + panic(err) + } + return u +} diff --git a/vendor/github.com/syndtr/goleveldb/.travis.yml b/vendor/github.com/syndtr/goleveldb/.travis.yml deleted file mode 100644 index 82de3773..00000000 --- a/vendor/github.com/syndtr/goleveldb/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip - -script: - - go test -timeout 1h ./... - - go test -timeout 30m -race -run "TestDB_(Concurrent|GoleveldbIssue74)" ./leveldb diff --git a/vendor/github.com/syndtr/goleveldb/README.md b/vendor/github.com/syndtr/goleveldb/README.md deleted file mode 100644 index 9b43dfec..00000000 --- a/vendor/github.com/syndtr/goleveldb/README.md +++ /dev/null @@ -1,107 +0,0 @@ -This is an implementation of the [LevelDB key/value database](http:code.google.com/p/leveldb) in the [Go programming language](http:golang.org). - -[![Build Status](https://travis-ci.org/syndtr/goleveldb.png?branch=master)](https://travis-ci.org/syndtr/goleveldb) - -Installation ------------ - - go get github.com/syndtr/goleveldb/leveldb - -Requirements ------------ - -* Need at least `go1.4` or newer. - -Usage ------------ - -Create or open a database: -```go -// The returned DB instance is safe for concurrent use. Which mean that all -// DB's methods may be called concurrently from multiple goroutine. -db, err := leveldb.OpenFile("path/to/db", nil) -... -defer db.Close() -... -``` -Read or modify the database content: -```go -// Remember that the contents of the returned slice should not be modified. -data, err := db.Get([]byte("key"), nil) -... -err = db.Put([]byte("key"), []byte("value"), nil) -... -err = db.Delete([]byte("key"), nil) -... -``` - -Iterate over database content: -```go -iter := db.NewIterator(nil, nil) -for iter.Next() { - // Remember that the contents of the returned slice should not be modified, and - // only valid until the next call to Next. - key := iter.Key() - value := iter.Value() - ... -} -iter.Release() -err = iter.Error() -... -``` -Seek-then-Iterate: -```go -iter := db.NewIterator(nil, nil) -for ok := iter.Seek(key); ok; ok = iter.Next() { - // Use key/value. - ... -} -iter.Release() -err = iter.Error() -... -``` -Iterate over subset of database content: -```go -iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) -for iter.Next() { - // Use key/value. - ... -} -iter.Release() -err = iter.Error() -... -``` -Iterate over subset of database content with a particular prefix: -```go -iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) -for iter.Next() { - // Use key/value. - ... -} -iter.Release() -err = iter.Error() -... -``` -Batch writes: -```go -batch := new(leveldb.Batch) -batch.Put([]byte("foo"), []byte("value")) -batch.Put([]byte("bar"), []byte("another value")) -batch.Delete([]byte("baz")) -err = db.Write(batch, nil) -... -``` -Use bloom filter: -```go -o := &opt.Options{ - Filter: filter.NewBloomFilter(10), -} -db, err := leveldb.OpenFile("path/to/db", o) -... -defer db.Close() -... -``` -Documentation ------------ - -You can read package documentation [here](http:godoc.org/github.com/syndtr/goleveldb). diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch_test.go deleted file mode 100644 index 62775f77..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/batch_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "testing" - "testing/quick" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestBatchHeader(t *testing.T) { - f := func(seq uint64, length uint32) bool { - encoded := encodeBatchHeader(nil, seq, int(length)) - decSeq, decLength, err := decodeBatchHeader(encoded) - return err == nil && decSeq == seq && decLength == int(length) - } - config := &quick.Config{ - Rand: testutil.NewRand(), - } - if err := quick.Check(f, config); err != nil { - t.Error(err) - } -} - -type batchKV struct { - kt keyType - k, v []byte -} - -func TestBatch(t *testing.T) { - var ( - kvs []batchKV - internalLen int - ) - batch := new(Batch) - rbatch := new(Batch) - abatch := new(Batch) - testBatch := func(i int, kt keyType, k, v []byte) error { - kv := kvs[i] - if kv.kt != kt { - return fmt.Errorf("invalid key type, index=%d: %d vs %d", i, kv.kt, kt) - } - if !bytes.Equal(kv.k, k) { - return fmt.Errorf("invalid key, index=%d", i) - } - if !bytes.Equal(kv.v, v) { - return fmt.Errorf("invalid value, index=%d", i) - } - return nil - } - f := func(ktr uint8, k, v []byte) bool { - kt := keyType(ktr % 2) - if kt == keyTypeVal { - batch.Put(k, v) - rbatch.Put(k, v) - kvs = append(kvs, batchKV{kt: kt, k: k, v: v}) - internalLen += len(k) + len(v) + 8 - } else { - batch.Delete(k) - rbatch.Delete(k) - kvs = append(kvs, batchKV{kt: kt, k: k}) - internalLen += len(k) + 8 - } - if batch.Len() != len(kvs) { - t.Logf("batch.Len: %d vs %d", len(kvs), batch.Len()) - return false - } - if batch.internalLen != internalLen { - t.Logf("abatch.internalLen: %d vs %d", internalLen, batch.internalLen) - return false - } - if len(kvs)%1000 == 0 { - if err := batch.replayInternal(testBatch); err != nil { - t.Logf("batch.replayInternal: %v", err) - return false - } - - abatch.append(rbatch) - rbatch.Reset() - if abatch.Len() != len(kvs) { - t.Logf("abatch.Len: %d vs %d", len(kvs), abatch.Len()) - return false - } - if abatch.internalLen != internalLen { - t.Logf("abatch.internalLen: %d vs %d", internalLen, abatch.internalLen) - return false - } - if err := abatch.replayInternal(testBatch); err != nil { - t.Logf("abatch.replayInternal: %v", err) - return false - } - - nbatch := new(Batch) - if err := nbatch.Load(batch.Dump()); err != nil { - t.Logf("nbatch.Load: %v", err) - return false - } - if nbatch.Len() != len(kvs) { - t.Logf("nbatch.Len: %d vs %d", len(kvs), nbatch.Len()) - return false - } - if nbatch.internalLen != internalLen { - t.Logf("nbatch.internalLen: %d vs %d", internalLen, nbatch.internalLen) - return false - } - if err := nbatch.replayInternal(testBatch); err != nil { - t.Logf("nbatch.replayInternal: %v", err) - return false - } - } - if len(kvs)%10000 == 0 { - nbatch := new(Batch) - if err := batch.Replay(nbatch); err != nil { - t.Logf("batch.Replay: %v", err) - return false - } - if nbatch.Len() != len(kvs) { - t.Logf("nbatch.Len: %d vs %d", len(kvs), nbatch.Len()) - return false - } - if nbatch.internalLen != internalLen { - t.Logf("nbatch.internalLen: %d vs %d", internalLen, nbatch.internalLen) - return false - } - if err := nbatch.replayInternal(testBatch); err != nil { - t.Logf("nbatch.replayInternal: %v", err) - return false - } - } - return true - } - config := &quick.Config{ - MaxCount: 40000, - Rand: testutil.NewRand(), - } - if err := quick.Check(f, config); err != nil { - t.Error(err) - } - t.Logf("length=%d internalLen=%d", len(kvs), internalLen) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/bench_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/bench_test.go deleted file mode 100644 index 435250c7..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/bench_test.go +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "sync/atomic" - "testing" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -func randomString(r *rand.Rand, n int) []byte { - b := new(bytes.Buffer) - for i := 0; i < n; i++ { - b.WriteByte(' ' + byte(r.Intn(95))) - } - return b.Bytes() -} - -func compressibleStr(r *rand.Rand, frac float32, n int) []byte { - nn := int(float32(n) * frac) - rb := randomString(r, nn) - b := make([]byte, 0, n+nn) - for len(b) < n { - b = append(b, rb...) - } - return b[:n] -} - -type valueGen struct { - src []byte - pos int -} - -func newValueGen(frac float32) *valueGen { - v := new(valueGen) - r := rand.New(rand.NewSource(301)) - v.src = make([]byte, 0, 1048576+100) - for len(v.src) < 1048576 { - v.src = append(v.src, compressibleStr(r, frac, 100)...) - } - return v -} - -func (v *valueGen) get(n int) []byte { - if v.pos+n > len(v.src) { - v.pos = 0 - } - v.pos += n - return v.src[v.pos-n : v.pos] -} - -var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) - -type dbBench struct { - b *testing.B - stor storage.Storage - db *DB - - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions - - keys, values [][]byte -} - -func openDBBench(b *testing.B, noCompress bool) *dbBench { - _, err := os.Stat(benchDB) - if err == nil { - err = os.RemoveAll(benchDB) - if err != nil { - b.Fatal("cannot remove old db: ", err) - } - } - - p := &dbBench{ - b: b, - o: &opt.Options{}, - ro: &opt.ReadOptions{}, - wo: &opt.WriteOptions{}, - } - p.stor, err = storage.OpenFile(benchDB, false) - if err != nil { - b.Fatal("cannot open stor: ", err) - } - if noCompress { - p.o.Compression = opt.NoCompression - } - - p.db, err = Open(p.stor, p.o) - if err != nil { - b.Fatal("cannot open db: ", err) - } - - return p -} - -func (p *dbBench) reopen() { - p.db.Close() - var err error - p.db, err = Open(p.stor, p.o) - if err != nil { - p.b.Fatal("Reopen: got error: ", err) - } -} - -func (p *dbBench) populate(n int) { - p.keys, p.values = make([][]byte, n), make([][]byte, n) - v := newValueGen(0.5) - for i := range p.keys { - p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) - } -} - -func (p *dbBench) randomize() { - m := len(p.keys) - times := m * 2 - r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) - for n := 0; n < times; n++ { - i, j := r1.Int()%m, r2.Int()%m - if i == j { - continue - } - p.keys[i], p.keys[j] = p.keys[j], p.keys[i] - p.values[i], p.values[j] = p.values[j], p.values[i] - } -} - -func (p *dbBench) writes(perBatch int) { - b := p.b - db := p.db - - n := len(p.keys) - m := n / perBatch - if n%perBatch > 0 { - m++ - } - batches := make([]Batch, m) - j := 0 - for i := range batches { - first := true - for ; j < n && ((j+1)%perBatch != 0 || first); j++ { - first = false - batches[i].Put(p.keys[j], p.values[j]) - } - } - runtime.GC() - - b.ResetTimer() - b.StartTimer() - for i := range batches { - err := db.Write(&(batches[i]), p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) gc() { - p.keys, p.values = nil, nil - runtime.GC() -} - -func (p *dbBench) puts() { - b := p.b - db := p.db - - b.ResetTimer() - b.StartTimer() - for i := range p.keys { - err := db.Put(p.keys[i], p.values[i], p.wo) - if err != nil { - b.Fatal("put failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) fill() { - b := p.b - db := p.db - - perBatch := 10000 - batch := new(Batch) - for i, n := 0, len(p.keys); i < n; { - first := true - for ; i < n && ((i+1)%perBatch != 0 || first); i++ { - first = false - batch.Put(p.keys[i], p.values[i]) - } - err := db.Write(batch, p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - batch.Reset() - } -} - -func (p *dbBench) gets() { - b := p.b - db := p.db - - b.ResetTimer() - for i := range p.keys { - _, err := db.Get(p.keys[i], p.ro) - if err != nil { - b.Error("got error: ", err) - } - } - b.StopTimer() -} - -func (p *dbBench) seeks() { - b := p.b - - iter := p.newIter() - defer iter.Release() - b.ResetTimer() - for i := range p.keys { - if !iter.Seek(p.keys[i]) { - b.Error("value not found for: ", string(p.keys[i])) - } - } - b.StopTimer() -} - -func (p *dbBench) newIter() iterator.Iterator { - iter := p.db.NewIterator(nil, p.ro) - err := iter.Error() - if err != nil { - p.b.Fatal("cannot create iterator: ", err) - } - return iter -} - -func (p *dbBench) close() { - if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { - p.b.Log("Block pool stats: ", bp) - } - p.db.Close() - p.stor.Close() - os.RemoveAll(benchDB) - p.db = nil - p.keys = nil - p.values = nil - runtime.GC() -} - -func BenchmarkDBWrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatch(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatchUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBWriteRandomSync(b *testing.B) { - p := openDBBench(b, false) - p.wo.Sync = true - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBPut(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.puts() - p.close() -} - -func BenchmarkDBRead(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadGC(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverse(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverseTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBSeek(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.seeks() - p.close() -} - -func BenchmarkDBSeekRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.seeks() - p.close() -} - -func BenchmarkDBGet(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gets() - p.close() -} - -func BenchmarkDBGetRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.gets() - p.close() -} - -func BenchmarkDBReadConcurrent(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - for pb.Next() && iter.Next() { - } - }) -} - -func BenchmarkDBReadConcurrent2(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - var dir uint32 - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - if atomic.AddUint32(&dir, 1)%2 == 0 { - for pb.Next() && iter.Next() { - } - } else { - if pb.Next() && iter.Last() { - for pb.Next() && iter.Prev() { - } - } - } - }) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go deleted file mode 100644 index 89aef69a..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "math/rand" - "testing" - "time" -) - -func BenchmarkLRUCache(b *testing.B) { - c := NewCache(NewLRU(10000)) - - b.SetParallelism(10) - b.RunParallel(func(pb *testing.PB) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for pb.Next() { - key := uint64(r.Intn(1000000)) - c.Get(0, key, func() (int, Value) { - return 1, key - }).Release() - } - }) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go index c5940b23..c36ad323 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go @@ -331,7 +331,6 @@ func (r *Cache) delete(n *Node) bool { return deleted } } - return false } // Nodes returns number of 'cache node' in the map. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go deleted file mode 100644 index 6b017bd0..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "math/rand" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" -) - -type int32o int32 - -func (o *int32o) acquire() { - if atomic.AddInt32((*int32)(o), 1) != 1 { - panic("BUG: invalid ref") - } -} - -func (o *int32o) Release() { - if atomic.AddInt32((*int32)(o), -1) != 0 { - panic("BUG: invalid ref") - } -} - -type releaserFunc struct { - fn func() - value Value -} - -func (r releaserFunc) Release() { - if r.fn != nil { - r.fn() - } -} - -func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle { - return c.Get(ns, key, func() (int, Value) { - if relf != nil { - return charge, releaserFunc{relf, value} - } - return charge, value - }) -} - -type cacheMapTestParams struct { - nobjects, nhandles, concurrent, repeat int -} - -func TestCacheMap(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - var params []cacheMapTestParams - if testing.Short() { - params = []cacheMapTestParams{ - {1000, 100, 20, 3}, - {10000, 300, 50, 10}, - } - } else { - params = []cacheMapTestParams{ - {10000, 400, 50, 3}, - {100000, 1000, 100, 10}, - } - } - - var ( - objects [][]int32o - handles [][]unsafe.Pointer - ) - - for _, x := range params { - objects = append(objects, make([]int32o, x.nobjects)) - handles = append(handles, make([]unsafe.Pointer, x.nhandles)) - } - - c := NewCache(nil) - - wg := new(sync.WaitGroup) - var done int32 - - for ns, x := range params { - for i := 0; i < x.concurrent; i++ { - wg.Add(1) - go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for j := len(objects) * repeat; j >= 0; j-- { - key := uint64(r.Intn(len(objects))) - h := c.Get(uint64(ns), key, func() (int, Value) { - o := &objects[key] - o.acquire() - return 1, o - }) - if v := h.Value().(*int32o); v != &objects[key] { - t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v) - } - if objects[key] != 1 { - t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key]) - } - if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) { - h.Release() - } - } - }(ns, i, x.repeat, objects[ns], handles[ns]) - } - - go func(handles []unsafe.Pointer) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for atomic.LoadInt32(&done) == 0 { - i := r.Intn(len(handles)) - h := (*Handle)(atomic.LoadPointer(&handles[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) { - h.Release() - } - time.Sleep(time.Millisecond) - } - }(handles[ns]) - } - - go func() { - handles := make([]*Handle, 100000) - for atomic.LoadInt32(&done) == 0 { - for i := range handles { - handles[i] = c.Get(999999999, uint64(i), func() (int, Value) { - return 1, 1 - }) - } - for _, h := range handles { - h.Release() - } - } - }() - - wg.Wait() - - atomic.StoreInt32(&done, 1) - - for _, handles0 := range handles { - for i := range handles0 { - h := (*Handle)(atomic.LoadPointer(&handles0[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) { - h.Release() - } - } - } - - for ns, objects0 := range objects { - for i, o := range objects0 { - if o != 0 { - t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o) - } - } - } -} - -func TestCacheMap_NodesAndSize(t *testing.T) { - c := NewCache(nil) - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } - set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 2, nil) - set(c, 1, 1, 3, 3, nil) - set(c, 2, 1, 4, 1, nil) - if c.Nodes() != 4 { - t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes()) - } - if c.Size() != 7 { - t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size()) - } -} - -func TestLRUCache_Capacity(t *testing.T) { - c := NewCache(NewLRU(10)) - if c.Capacity() != 10 { - t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity()) - } - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 2, nil).Release() - set(c, 1, 1, 3, 3, nil).Release() - set(c, 2, 1, 4, 1, nil).Release() - set(c, 2, 2, 5, 1, nil).Release() - set(c, 2, 3, 6, 1, nil).Release() - set(c, 2, 4, 7, 1, nil).Release() - set(c, 2, 5, 8, 1, nil).Release() - if c.Nodes() != 7 { - t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes()) - } - if c.Size() != 10 { - t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size()) - } - c.SetCapacity(9) - if c.Capacity() != 9 { - t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity()) - } - if c.Nodes() != 6 { - t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes()) - } - if c.Size() != 8 { - t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size()) - } -} - -func TestCacheMap_NilValue(t *testing.T) { - c := NewCache(NewLRU(10)) - h := c.Get(0, 0, func() (size int, value Value) { - return 1, nil - }) - if h != nil { - t.Error("cache handle is non-nil") - } - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } -} - -func TestLRUCache_GetLatency(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - const ( - concurrentSet = 30 - concurrentGet = 3 - duration = 3 * time.Second - delay = 3 * time.Millisecond - maxkey = 100000 - ) - - var ( - set, getHit, getAll int32 - getMaxLatency, getDuration int64 - ) - - c := NewCache(NewLRU(5000)) - wg := &sync.WaitGroup{} - until := time.Now().Add(duration) - for i := 0; i < concurrentSet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for time.Now().Before(until) { - c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) { - time.Sleep(delay) - atomic.AddInt32(&set, 1) - return 1, 1 - }).Release() - } - }(i) - } - for i := 0; i < concurrentGet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for { - mark := time.Now() - if mark.Before(until) { - h := c.Get(0, uint64(r.Intn(maxkey)), nil) - latency := int64(time.Now().Sub(mark)) - m := atomic.LoadInt64(&getMaxLatency) - if latency > m { - atomic.CompareAndSwapInt64(&getMaxLatency, m, latency) - } - atomic.AddInt64(&getDuration, latency) - if h != nil { - atomic.AddInt32(&getHit, 1) - h.Release() - } - atomic.AddInt32(&getAll, 1) - } else { - break - } - } - }(i) - } - - wg.Wait() - getAvglatency := time.Duration(getDuration) / time.Duration(getAll) - t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v", - set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency) - - if getAvglatency > delay/3 { - t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency) - } -} - -func TestLRUCache_HitMiss(t *testing.T) { - cases := []struct { - key uint64 - value string - }{ - {1, "vvvvvvvvv"}, - {100, "v1"}, - {0, "v2"}, - {12346, "v3"}, - {777, "v4"}, - {999, "v5"}, - {7654, "v6"}, - {2, "v7"}, - {3, "v8"}, - {9, "v9"}, - } - - setfin := 0 - c := NewCache(NewLRU(1000)) - for i, x := range cases { - set(c, 0, x.key, x.value, len(x.value), func() { - setfin++ - }).Release() - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j <= i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - for i, x := range cases { - finalizerOk := false - c.Delete(0, x.key, func() { - finalizerOk = true - }) - - if !finalizerOk { - t.Errorf("case %d delete finalizer not executed", i) - } - - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j > i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - if setfin != len(cases) { - t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) - } -} - -func TestLRUCache_Eviction(t *testing.T) { - c := NewCache(NewLRU(12)) - o1 := set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 1, nil).Release() - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - set(c, 0, 5, 5, 1, nil).Release() - if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2 - h.Release() - } - set(c, 0, 9, 9, 10, nil).Release() // 5,2,9 - - for _, key := range []uint64{9, 2, 5, 1} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - o1.Release() - for _, key := range []uint64{1, 2, 5} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - for _, key := range []uint64{3, 4, 9} { - h := c.Get(0, key, nil) - if h != nil { - t.Errorf("hit for key '%d'", key) - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } -} - -func TestLRUCache_Evict(t *testing.T) { - c := NewCache(NewLRU(6)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - set(c, 1, 1, 4, 1, nil).Release() - set(c, 1, 2, 5, 1, nil).Release() - set(c, 2, 1, 6, 1, nil).Release() - set(c, 2, 2, 7, 1, nil).Release() - - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d.%d return nil", ns, key) - } - } - } - - if ok := c.Evict(0, 1); !ok { - t.Error("first Cache.Evict on #0.1 return false") - } - if ok := c.Evict(0, 1); ok { - t.Error("second Cache.Evict on #0.1 return true") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value()) - } - - c.EvictNS(1) - if h := c.Get(1, 1, nil); h != nil { - t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value()) - } - if h := c.Get(1, 2, nil); h != nil { - t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value()) - } - - c.EvictAll() - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value()) - } - } - } -} - -func TestLRUCache_Delete(t *testing.T) { - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - - if ok := c.Delete(0, 1, delFunc); !ok { - t.Error("Cache.Delete on #1 return false") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value()) - } - if ok := c.Delete(0, 1, delFunc); ok { - t.Error("Cache.Delete on #1 return true") - } - - h2 := c.Get(0, 2, nil) - if h2 == nil { - t.Error("Cache.Get on #2 return nil") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(1) Cache.Delete on #2 return false") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(2) Cache.Delete on #2 return false") - } - - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - c.Get(0, 2, nil).Release() - - for key := 2; key <= 4; key++ { - if h := c.Get(0, uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d return nil", key) - } - } - - h2.Release() - if h := c.Get(0, 2, nil); h != nil { - t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value()) - } - - if delFuncCalled != 4 { - t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled) - } -} - -func TestLRUCache_Close(t *testing.T) { - relFuncCalled := 0 - relFunc := func() { - relFuncCalled++ - } - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, relFunc).Release() - set(c, 0, 2, 2, 1, relFunc).Release() - - h3 := set(c, 0, 3, 3, 1, relFunc) - if h3 == nil { - t.Error("Cache.Get on #3 return nil") - } - if ok := c.Delete(0, 3, delFunc); !ok { - t.Error("Cache.Delete on #3 return false") - } - - c.Close() - - if relFuncCalled != 3 { - t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled) - } - if delFuncCalled != 1 { - t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled) - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go index 14dddf88..abf9fb65 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go @@ -29,7 +29,7 @@ func (bytesComparer) Separator(dst, a, b []byte) []byte { // Do not shorten if one string is a prefix of the other } else if c := a[i]; c < 0xff && c+1 < b[i] { dst = append(dst, a[:i+1]...) - dst[i]++ + dst[len(dst)-1]++ return dst } return nil @@ -39,7 +39,7 @@ func (bytesComparer) Successor(dst, b []byte) []byte { for i, c := range b { if c != 0xff { dst = append(dst, b[:i+1]...) - dst[i]++ + dst[len(dst)-1]++ return dst } } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go index 14a28f16..2c522db2 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go @@ -36,7 +36,7 @@ type Comparer interface { // by any users of this package. Name() string - // Bellow are advanced functions used used to reduce the space requirements + // Bellow are advanced functions used to reduce the space requirements // for internal data structures such as index blocks. // Separator appends a sequence of bytes x to dst such that a <= x && x < b, diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/corrupt_test.go deleted file mode 100644 index fef2026c..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/corrupt_test.go +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "io" - "math/rand" - "testing" - - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -const ctValSize = 1000 - -type dbCorruptHarness struct { - dbHarness -} - -func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { - h := new(dbCorruptHarness) - h.init(t, o) - return h -} - -func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { - return newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - }) -} - -func (h *dbCorruptHarness) recover() { - p := &h.dbHarness - t := p.t - - var err error - p.db, err = Recover(h.stor, h.o) - if err != nil { - t.Fatal("Repair: got error: ", err) - } -} - -func (h *dbCorruptHarness) build(n int) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := range rnd.Perm(n) { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Delete(tkey(rnd.Intn(max))) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) { - p := &h.dbHarness - t := p.t - - fds, _ := p.stor.List(ft) - sortFds(fds) - if fi < 0 { - fi = len(fds) - 1 - } - if fi >= len(fds) { - t.Fatalf("no such file with type %q with index %d", ft, fi) - } - - fd := fds[fi] - r, err := h.stor.Open(fd) - if err != nil { - t.Fatal("cannot open file: ", err) - } - x, err := r.Seek(0, 2) - if err != nil { - t.Fatal("cannot query file size: ", err) - } - m := int(x) - if _, err := r.Seek(0, 0); err != nil { - t.Fatal(err) - } - - if offset < 0 { - if -offset > m { - offset = 0 - } else { - offset = m + offset - } - } - if offset > m { - offset = m - } - if offset+n > m { - n = m - offset - } - - buf := make([]byte, m) - _, err = io.ReadFull(r, buf) - if err != nil { - t.Fatal("cannot read file: ", err) - } - r.Close() - - for i := 0; i < n; i++ { - buf[offset+i] ^= 0x80 - } - - err = h.stor.Remove(fd) - if err != nil { - t.Fatal("cannot remove old file: ", err) - } - w, err := h.stor.Create(fd) - if err != nil { - t.Fatal("cannot create new file: ", err) - } - _, err = w.Write(buf) - if err != nil { - t.Fatal("cannot write new file: ", err) - } - w.Close() -} - -func (h *dbCorruptHarness) removeAll(ft storage.FileType) { - fds, err := h.stor.List(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - for _, fd := range fds { - if err := h.stor.Remove(fd); err != nil { - h.t.Error("remove file: ", err) - } - } -} - -func (h *dbCorruptHarness) forceRemoveAll(ft storage.FileType) { - fds, err := h.stor.List(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - for _, fd := range fds { - if err := h.stor.ForceRemove(fd); err != nil { - h.t.Error("remove file: ", err) - } - } -} - -func (h *dbCorruptHarness) removeOne(ft storage.FileType) { - fds, err := h.stor.List(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - fd := fds[rand.Intn(len(fds))] - h.t.Logf("removing file @%d", fd.Num) - if err := h.stor.Remove(fd); err != nil { - h.t.Error("remove file: ", err) - } -} - -func (h *dbCorruptHarness) check(min, max int) { - p := &h.dbHarness - t := p.t - db := p.db - - var n, badk, badv, missed, good int - iter := db.NewIterator(nil, p.ro) - for iter.Next() { - k := 0 - fmt.Sscanf(string(iter.Key()), "%d", &k) - if k < n { - badk++ - continue - } - missed += k - n - n = k + 1 - if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { - badv++ - } else { - good++ - } - } - err := iter.Error() - iter.Release() - t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", - min, max, good, badk, badv, missed, err) - if good < min || good > max { - t.Errorf("good entries number not in range") - } -} - -func TestCorruptDB_Journal(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.build(100) - h.check(100, 100) - h.closeDB() - h.corrupt(storage.TypeJournal, -1, 19, 1) - h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1) - - h.openDB() - h.check(36, 36) -} - -func TestCorruptDB_Table(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.build(100) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(99, 99) -} - -func TestCorruptDB_TableIndex(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.build(10000) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, -2000, 500) - - h.openDB() - h.check(5000, 9999) -} - -func TestCorruptDB_MissingManifest(t *testing.T) { - rnd := rand.New(rand.NewSource(0x0badda7a)) - h := newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - WriteBuffer: 1000 * 60, - }) - defer h.close() - - h.build(1000) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.closeDB() - - h.forceRemoveAll(storage.TypeManifest) - h.openAssert(false) - - h.recover() - h.check(1000, 1000) - h.build(1000) - h.compactMem() - h.compactRange("", "") - h.closeDB() - - h.recover() - h.check(1000, 1000) -} - -func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.put("foo", "v4") - h.put("foo", "v5") - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") -} - -func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.compactMem() - h.put("foo", "v4") - h.put("foo", "v5") - h.compactMem() - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") -} - -func TestCorruptDB_CorruptedManifest(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.put("foo", "hello") - h.compactMem() - h.compactRange("", "") - h.closeDB() - h.corrupt(storage.TypeManifest, -1, 0, 1000) - h.openAssert(false) - - h.recover() - h.getVal("foo", "hello") -} - -func TestCorruptDB_CompactionInputError(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(9, 9) - - h.build(10000) - h.check(10000, 10000) -} - -func TestCorruptDB_UnrelatedKeys(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.put(string(tkey(1000)), string(tval(1000, ctValSize))) - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - h.compactMem() - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) -} - -func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(1, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") -} - -func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(0, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") -} - -func TestCorruptDB_MissingTableFiles(t *testing.T) { - h := newDbCorruptHarness(t) - defer h.close() - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("c", "v2") - h.put("d", "v2") - h.compactMem() - h.put("e", "v3") - h.put("f", "v3") - h.closeDB() - - h.removeOne(storage.TypeTable) - h.openAssert(false) -} - -func TestCorruptDB_RecoverTable(t *testing.T) { - h := newDbCorruptHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - Filter: filter.NewBloomFilter(10), - }) - defer h.close() - - h.build(1000) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - seq := h.db.seq - h.closeDB() - h.corrupt(storage.TypeTable, 0, 1000, 1) - h.corrupt(storage.TypeTable, 3, 10000, 1) - // Corrupted filter shouldn't affect recovery. - h.corrupt(storage.TypeTable, 3, 113888, 10) - h.corrupt(storage.TypeTable, -1, 20000, 1) - - h.recover() - if h.db.seq != seq { - t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) - } - h.check(985, 985) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go index b0cdcb3d..90fedf7b 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go @@ -32,6 +32,12 @@ type DB struct { // Need 64-bit alignment. seq uint64 + // Stats. Need 64-bit alignment. + cWriteDelay int64 // The cumulative duration of write delays + cWriteDelayN int32 // The cumulative number of write delays + inWritePaused int32 // The indicator whether write operation is paused by compaction + aliveSnaps, aliveIters int32 + // Session. s *session @@ -49,9 +55,6 @@ type DB struct { snapsMu sync.Mutex snapsList *list.List - // Stats. - aliveSnaps, aliveIters int32 - // Write. batchPool sync.Pool writeMergeC chan writeMerge @@ -179,7 +182,7 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { err = s.recover() if err != nil { - if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { + if !os.IsNotExist(err) || s.o.GetErrorIfMissing() || s.o.GetReadOnly() { return } err = s.create() @@ -321,7 +324,7 @@ func recoverTable(s *session, o *opt.Options) error { } } err = iter.Error() - if err != nil { + if err != nil && !errors.IsCorrupted(err) { return } err = tw.Close() @@ -392,7 +395,7 @@ func recoverTable(s *session, o *opt.Options) error { } imax = append(imax[:0], key...) } - if err := iter.Error(); err != nil { + if err := iter.Error(); err != nil && !errors.IsCorrupted(err) { iter.Release() return err } @@ -869,6 +872,10 @@ func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { // DB. And a nil Range.Limit is treated as a key after all keys in // the DB. // +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// // The iterator must be released after use, by calling Release method. // // Also read Iterator documentation of the leveldb/iterator package. @@ -904,6 +911,10 @@ func (db *DB) GetSnapshot() (*Snapshot, error) { // Returns the number of files at level 'n'. // leveldb.stats // Returns statistics of the underlying DB. +// leveldb.iostats +// Returns statistics of effective disk read and write. +// leveldb.writedelay +// Returns cumulative write delay caused by compaction. // leveldb.sstables // Returns sstables list for each level. // leveldb.blockpool @@ -955,6 +966,14 @@ func (db *DB) GetProperty(name string) (value string, err error) { level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), float64(read)/1048576.0, float64(write)/1048576.0) } + case p == "iostats": + value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f", + float64(db.s.stor.reads())/1048576.0, + float64(db.s.stor.writes())/1048576.0) + case p == "writedelay": + writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay)) + paused := atomic.LoadInt32(&db.inWritePaused) == 1 + value = fmt.Sprintf("DelayN:%d Delay:%s Paused:%t", writeDelayN, writeDelay, paused) case p == "sstables": for level, tables := range v.levels { value += fmt.Sprintf("--- level %d ---\n", level) @@ -983,6 +1002,75 @@ func (db *DB) GetProperty(name string) (value string, err error) { return } +// DBStats is database statistics. +type DBStats struct { + WriteDelayCount int32 + WriteDelayDuration time.Duration + WritePaused bool + + AliveSnapshots int32 + AliveIterators int32 + + IOWrite uint64 + IORead uint64 + + BlockCacheSize int + OpenedTablesCount int + + LevelSizes []int64 + LevelTablesCounts []int + LevelRead []int64 + LevelWrite []int64 + LevelDurations []time.Duration +} + +// Stats populates s with database statistics. +func (db *DB) Stats(s *DBStats) error { + err := db.ok() + if err != nil { + return err + } + + s.IORead = db.s.stor.reads() + s.IOWrite = db.s.stor.writes() + s.WriteDelayCount = atomic.LoadInt32(&db.cWriteDelayN) + s.WriteDelayDuration = time.Duration(atomic.LoadInt64(&db.cWriteDelay)) + s.WritePaused = atomic.LoadInt32(&db.inWritePaused) == 1 + + s.OpenedTablesCount = db.s.tops.cache.Size() + if db.s.tops.bcache != nil { + s.BlockCacheSize = db.s.tops.bcache.Size() + } else { + s.BlockCacheSize = 0 + } + + s.AliveIterators = atomic.LoadInt32(&db.aliveIters) + s.AliveSnapshots = atomic.LoadInt32(&db.aliveSnaps) + + s.LevelDurations = s.LevelDurations[:0] + s.LevelRead = s.LevelRead[:0] + s.LevelWrite = s.LevelWrite[:0] + s.LevelSizes = s.LevelSizes[:0] + s.LevelTablesCounts = s.LevelTablesCounts[:0] + + v := db.s.version() + defer v.release() + + for level, tables := range v.levels { + duration, read, write := db.compStats.getStat(level) + if len(tables) == 0 && duration == 0 { + continue + } + s.LevelDurations = append(s.LevelDurations, duration) + s.LevelRead = append(s.LevelRead, read) + s.LevelWrite = append(s.LevelWrite, write) + s.LevelSizes = append(s.LevelSizes, tables.size()) + s.LevelTablesCounts = append(s.LevelTablesCounts, len(tables)) + } + + return nil +} + // SizeOf calculates approximate sizes of the given key ranges. // The length of the returned sizes are equal with the length of the given // ranges. The returned sizes measure storage space usage, so if the user diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go index b6563e87..0c1b9a53 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go @@ -640,6 +640,16 @@ func (db *DB) tableNeedCompaction() bool { return v.needCompaction() } +// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted. +func (db *DB) resumeWrite() bool { + v := db.s.version() + defer v.release() + if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() { + return true + } + return false +} + func (db *DB) pauseCompaction(ch chan<- struct{}) { select { case ch <- struct{}{}: @@ -653,6 +663,7 @@ type cCmd interface { } type cAuto struct { + // Note for table compaction, an non-empty ackC represents it's a compaction waiting command. ackC chan<- error } @@ -765,8 +776,10 @@ func (db *DB) mCompaction() { } func (db *DB) tCompaction() { - var x cCmd - var ackQ []cCmd + var ( + x cCmd + waitQ []cCmd + ) defer func() { if x := recover(); x != nil { @@ -774,9 +787,9 @@ func (db *DB) tCompaction() { panic(x) } } - for i := range ackQ { - ackQ[i].ack(ErrClosed) - ackQ[i] = nil + for i := range waitQ { + waitQ[i].ack(ErrClosed) + waitQ[i] = nil } if x != nil { x.ack(ErrClosed) @@ -795,12 +808,20 @@ func (db *DB) tCompaction() { return default: } + // Resume write operation as soon as possible. + if len(waitQ) > 0 && db.resumeWrite() { + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil + } + waitQ = waitQ[:0] + } } else { - for i := range ackQ { - ackQ[i].ack(nil) - ackQ[i] = nil + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil } - ackQ = ackQ[:0] + waitQ = waitQ[:0] select { case x = <-db.tcompCmdC: case ch := <-db.tcompPauseC: @@ -813,7 +834,14 @@ func (db *DB) tCompaction() { if x != nil { switch cmd := x.(type) { case cAuto: - ackQ = append(ackQ, x) + if cmd.ackC != nil { + // Check the write pause state before caching it. + if db.resumeWrite() { + x.ack(nil) + } else { + waitQ = append(waitQ, x) + } + } case cRange: x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) default: diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go index 2c69d2e5..c2ad70c8 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go @@ -142,6 +142,10 @@ func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) // DB. And a nil Range.Limit is treated as a key after all keys in // the DB. // +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Value() methods), its content should not be +// modified unless noted otherwise. +// // The iterator must be released after use, by calling Release method. // Releasing the snapshot doesn't mean releasing the iterator too, the // iterator would be still valid until released. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_test.go deleted file mode 100644 index 602376b0..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_test.go +++ /dev/null @@ -1,2925 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "container/list" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" - - "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func tkey(i int) []byte { - return []byte(fmt.Sprintf("%016d", i)) -} - -func tval(seed, n int) []byte { - r := rand.New(rand.NewSource(int64(seed))) - return randomString(r, n) -} - -func testingLogger(t *testing.T) func(log string) { - return func(log string) { - t.Log(log) - } -} - -func testingPreserveOnFailed(t *testing.T) func() (preserve bool, err error) { - return func() (preserve bool, err error) { - preserve = t.Failed() - return - } -} - -type dbHarness struct { - t *testing.T - - stor *testutil.Storage - db *DB - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions -} - -func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { - h := new(dbHarness) - h.init(t, o) - return h -} - -func newDbHarness(t *testing.T) *dbHarness { - return newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true}) -} - -func (h *dbHarness) init(t *testing.T, o *opt.Options) { - gomega.RegisterTestingT(t) - h.t = t - h.stor = testutil.NewStorage() - h.stor.OnLog(testingLogger(t)) - h.stor.OnClose(testingPreserveOnFailed(t)) - h.o = o - h.ro = nil - h.wo = nil - - if err := h.openDB0(); err != nil { - // So that it will come after fatal message. - defer h.stor.Close() - h.t.Fatal("Open (init): got error: ", err) - } -} - -func (h *dbHarness) openDB0() (err error) { - h.t.Log("opening DB") - h.db, err = Open(h.stor, h.o) - return -} - -func (h *dbHarness) openDB() { - if err := h.openDB0(); err != nil { - h.t.Fatal("Open: got error: ", err) - } -} - -func (h *dbHarness) closeDB0() error { - h.t.Log("closing DB") - return h.db.Close() -} - -func (h *dbHarness) closeDB() { - if h.db != nil { - if err := h.closeDB0(); err != nil { - h.t.Error("Close: got error: ", err) - } - } - h.stor.CloseCheck() - runtime.GC() -} - -func (h *dbHarness) reopenDB() { - if h.db != nil { - h.closeDB() - } - h.openDB() -} - -func (h *dbHarness) close() { - if h.db != nil { - h.closeDB0() - h.db = nil - } - h.stor.Close() - h.stor = nil - runtime.GC() -} - -func (h *dbHarness) openAssert(want bool) { - db, err := Open(h.stor, h.o) - if err != nil { - if want { - h.t.Error("Open: assert: got error: ", err) - } else { - h.t.Log("Open: assert: got error (expected): ", err) - } - } else { - if !want { - h.t.Error("Open: assert: expect error") - } - db.Close() - } -} - -func (h *dbHarness) write(batch *Batch) { - if err := h.db.Write(batch, h.wo); err != nil { - h.t.Error("Write: got error: ", err) - } -} - -func (h *dbHarness) put(key, value string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { - h.t.Error("Put: got error: ", err) - } -} - -func (h *dbHarness) putMulti(n int, low, hi string) { - for i := 0; i < n; i++ { - h.put(low, "begin") - h.put(hi, "end") - h.compactMem() - } -} - -func (h *dbHarness) maxNextLevelOverlappingBytes(want int64) { - t := h.t - db := h.db - - var ( - maxOverlaps int64 - maxLevel int - ) - v := db.s.version() - if len(v.levels) > 2 { - for i, tt := range v.levels[1 : len(v.levels)-1] { - level := i + 1 - next := v.levels[level+1] - for _, t := range tt { - r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) - sum := r.size() - if sum > maxOverlaps { - maxOverlaps = sum - maxLevel = level - } - } - } - } - v.release() - - if maxOverlaps > want { - t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel) - } else { - t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want) - } -} - -func (h *dbHarness) delete(key string) { - t := h.t - db := h.db - - err := db.Delete([]byte(key), h.wo) - if err != nil { - t.Error("Delete: got error: ", err) - } -} - -func (h *dbHarness) assertNumKeys(want int) { - iter := h.db.NewIterator(nil, h.ro) - defer iter.Release() - got := 0 - for iter.Next() { - got++ - } - if err := iter.Error(); err != nil { - h.t.Error("assertNumKeys: ", err) - } - if want != got { - h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) - } -} - -func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { - t := h.t - v, err := db.Get([]byte(key), h.ro) - switch err { - case ErrNotFound: - if expectFound { - t.Errorf("Get: key '%s' not found, want found", key) - } - case nil: - found = true - if !expectFound { - t.Errorf("Get: key '%s' found, want not found", key) - } - default: - t.Error("Get: got error: ", err) - } - return -} - -func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { - return h.getr(h.db, key, expectFound) -} - -func (h *dbHarness) getValr(db Reader, key, value string) { - t := h.t - found, r := h.getr(db, key, true) - if !found { - return - } - rval := string(r) - if rval != value { - t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) - } -} - -func (h *dbHarness) getVal(key, value string) { - h.getValr(h.db, key, value) -} - -func (h *dbHarness) allEntriesFor(key, want string) { - t := h.t - db := h.db - s := db.s - - ikey := makeInternalKey(nil, []byte(key), keyMaxSeq, keyTypeVal) - iter := db.newRawIterator(nil, nil, nil, nil) - if !iter.Seek(ikey) && iter.Error() != nil { - t.Error("AllEntries: error during seek, err: ", iter.Error()) - return - } - res := "[ " - first := true - for iter.Valid() { - if ukey, _, kt, kerr := parseInternalKey(iter.Key()); kerr == nil { - if s.icmp.uCompare(ikey.ukey(), ukey) != 0 { - break - } - if !first { - res += ", " - } - first = false - switch kt { - case keyTypeVal: - res += string(iter.Value()) - case keyTypeDel: - res += "DEL" - } - } else { - if !first { - res += ", " - } - first = false - res += "CORRUPTED" - } - iter.Next() - } - if !first { - res += " " - } - res += "]" - if res != want { - t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) - } -} - -// Return a string that contains all key,value pairs in order, -// formatted like "(k1->v1)(k2->v2)". -func (h *dbHarness) getKeyVal(want string) { - t := h.t - db := h.db - - s, err := db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - res := "" - iter := s.NewIterator(nil, nil) - for iter.Next() { - res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) - } - iter.Release() - - if res != want { - t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) - } - s.Release() -} - -func (h *dbHarness) waitCompaction() { - t := h.t - db := h.db - if err := db.compTriggerWait(db.tcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) waitMemCompaction() { - t := h.t - db := h.db - - if err := db.compTriggerWait(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) compactMem() { - t := h.t - db := h.db - - t.Log("starting memdb compaction") - - db.writeLockC <- struct{}{} - defer func() { - <-db.writeLockC - }() - - if _, err := db.rotateMem(0, true); err != nil { - t.Error("compaction error: ", err) - } - - if h.totalTables() == 0 { - t.Error("zero tables after mem compaction") - } - - t.Log("memdb compaction done") -} - -func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { - t := h.t - db := h.db - - var _min, _max []byte - if min != "" { - _min = []byte(min) - } - if max != "" { - _max = []byte(max) - } - - t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max) - - if err := db.compTriggerRange(db.tcompCmdC, level, _min, _max); err != nil { - if wanterr { - t.Log("CompactRangeAt: got error (expected): ", err) - } else { - t.Error("CompactRangeAt: got error: ", err) - } - } else if wanterr { - t.Error("CompactRangeAt: expect error") - } - - t.Log("table range compaction done") -} - -func (h *dbHarness) compactRangeAt(level int, min, max string) { - h.compactRangeAtErr(level, min, max, false) -} - -func (h *dbHarness) compactRange(min, max string) { - t := h.t - db := h.db - - t.Logf("starting DB range compaction: min=%q, max=%q", min, max) - - var r util.Range - if min != "" { - r.Start = []byte(min) - } - if max != "" { - r.Limit = []byte(max) - } - if err := db.CompactRange(r); err != nil { - t.Error("CompactRange: got error: ", err) - } - - t.Log("DB range compaction done") -} - -func (h *dbHarness) sizeOf(start, limit string) int64 { - sz, err := h.db.SizeOf([]util.Range{ - {[]byte(start), []byte(limit)}, - }) - if err != nil { - h.t.Error("SizeOf: got error: ", err) - } - return sz.Sum() -} - -func (h *dbHarness) sizeAssert(start, limit string, low, hi int64) { - sz := h.sizeOf(start, limit) - if sz < low || sz > hi { - h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d", - shorten(start), shorten(limit), low, hi, sz) - } -} - -func (h *dbHarness) getSnapshot() (s *Snapshot) { - s, err := h.db.GetSnapshot() - if err != nil { - h.t.Fatal("GetSnapshot: got error: ", err) - } - return -} - -func (h *dbHarness) getTablesPerLevel() string { - res := "" - nz := 0 - v := h.db.s.version() - for level, tables := range v.levels { - if level > 0 { - res += "," - } - res += fmt.Sprint(len(tables)) - if len(tables) > 0 { - nz = len(res) - } - } - v.release() - return res[:nz] -} - -func (h *dbHarness) tablesPerLevel(want string) { - res := h.getTablesPerLevel() - if res != want { - h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) - } -} - -func (h *dbHarness) totalTables() (n int) { - v := h.db.s.version() - for _, tables := range v.levels { - n += len(tables) - } - v.release() - return -} - -type keyValue interface { - Key() []byte - Value() []byte -} - -func testKeyVal(t *testing.T, kv keyValue, want string) { - res := string(kv.Key()) + "->" + string(kv.Value()) - if res != want { - t.Errorf("invalid key/value, want=%q, got=%q", want, res) - } -} - -func numKey(num int) string { - return fmt.Sprintf("key%06d", num) -} - -var testingBloomFilter = filter.NewBloomFilter(10) - -func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { - for i := 0; i < 4; i++ { - func() { - switch i { - case 0: - case 1: - if o == nil { - o = &opt.Options{ - DisableLargeBatchTransaction: true, - Filter: testingBloomFilter, - } - } else { - old := o - o = &opt.Options{} - *o = *old - o.Filter = testingBloomFilter - } - case 2: - if o == nil { - o = &opt.Options{ - DisableLargeBatchTransaction: true, - Compression: opt.NoCompression, - } - } else { - old := o - o = &opt.Options{} - *o = *old - o.Compression = opt.NoCompression - } - } - h := newDbHarnessWopt(t, o) - defer h.close() - switch i { - case 3: - h.reopenDB() - } - f(h) - }() - } -} - -func trun(t *testing.T, f func(h *dbHarness)) { - truno(t, nil, f) -} - -func testAligned(t *testing.T, name string, offset uintptr) { - if offset%8 != 0 { - t.Errorf("field %s offset is not 64-bit aligned", name) - } -} - -func Test_FieldsAligned(t *testing.T) { - p1 := new(DB) - testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) - p2 := new(session) - testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum)) - testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) - testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) - testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum)) -} - -func TestDB_Locking(t *testing.T) { - h := newDbHarness(t) - defer h.stor.Close() - h.openAssert(false) - h.closeDB() - h.openAssert(true) -} - -func TestDB_Empty(t *testing.T) { - trun(t, func(h *dbHarness) { - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_ReadWrite(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("bar", "v2") - h.put("foo", "v3") - h.getVal("foo", "v3") - h.getVal("bar", "v2") - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "v2") - }) -} - -func TestDB_PutDeleteGet(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.getVal("foo", "v2") - h.delete("foo") - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_EmptyBatch(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.get("foo", false) - err := h.db.Write(new(Batch), h.wo) - if err != nil { - t.Error("writing empty batch yield error: ", err) - } - h.get("foo", false) -} - -func TestDB_GetFromFrozen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - WriteBuffer: 100100, - }) - defer h.close() - - h.put("foo", "v1") - h.getVal("foo", "v1") - - h.stor.Stall(testutil.ModeSync, storage.TypeTable) // Block sync calls - h.put("k1", strings.Repeat("x", 100000)) // Fill memtable - h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction - for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { - time.Sleep(10 * time.Microsecond) - } - if h.db.getFrozenMem() == nil { - h.stor.Release(testutil.ModeSync, storage.TypeTable) - t.Fatal("No frozen mem") - } - h.getVal("foo", "v1") - h.stor.Release(testutil.ModeSync, storage.TypeTable) // Release sync calls - - h.reopenDB() - h.getVal("foo", "v1") - h.get("k1", true) - h.get("k2", true) -} - -func TestDB_GetFromTable(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.getVal("foo", "v1") - }) -} - -func TestDB_GetSnapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - bar := strings.Repeat("b", 200) - h.put("foo", "v1") - h.put(bar, "v1") - - snap, err := h.db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.put("foo", "v2") - h.put(bar, "v2") - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - h.compactMem() - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - snap.Release() - - h.reopenDB() - h.getVal("foo", "v2") - h.getVal(bar, "v2") - }) -} - -func TestDB_GetLevel0Ordering(t *testing.T) { - trun(t, func(h *dbHarness) { - h.db.memdbMaxLevel = 2 - - for i := 0; i < 4; i++ { - h.put("bar", fmt.Sprintf("b%d", i)) - h.put("foo", fmt.Sprintf("v%d", i)) - h.compactMem() - } - h.getVal("foo", "v3") - h.getVal("bar", "b3") - - v := h.db.s.version() - t0len := v.tLen(0) - v.release() - if t0len < 2 { - t.Errorf("level-0 tables is less than 2, got %d", t0len) - } - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "b3") - }) -} - -func TestDB_GetOrderedByLevels(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.compactRange("a", "z") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.compactMem() - h.getVal("foo", "v2") - }) -} - -func TestDB_GetPicksCorrectFile(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange to have multiple files in a non-level-0 level. - h.put("a", "va") - h.compactMem() - h.compactRange("a", "b") - h.put("x", "vx") - h.compactMem() - h.compactRange("x", "y") - h.put("f", "vf") - h.compactMem() - h.compactRange("f", "g") - - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - - h.compactRange("", "") - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - }) -} - -func TestDB_GetEncountersEmptyLevel(t *testing.T) { - trun(t, func(h *dbHarness) { - h.db.memdbMaxLevel = 2 - - // Arrange for the following to happen: - // * sstable A in level 0 - // * nothing in level 1 - // * sstable B in level 2 - // Then do enough Get() calls to arrange for an automatic compaction - // of sstable A. A bug would cause the compaction to be marked as - // occuring at level 1 (instead of the correct level 0). - - // Step 1: First place sstables in levels 0 and 2 - for i := 0; ; i++ { - if i >= 100 { - t.Fatal("could not fill levels-0 and level-2") - } - v := h.db.s.version() - if v.tLen(0) > 0 && v.tLen(2) > 0 { - v.release() - break - } - v.release() - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - - h.getVal("a", "begin") - h.getVal("z", "end") - } - - // Step 2: clear level 1 if necessary. - h.compactRangeAt(1, "", "") - h.tablesPerLevel("1,0,1") - - h.getVal("a", "begin") - h.getVal("z", "end") - - // Step 3: read a bunch of times - for i := 0; i < 200; i++ { - h.get("missing", false) - } - - // Step 4: Wait for compaction to finish - h.waitCompaction() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - h.getVal("a", "begin") - h.getVal("z", "end") - }) -} - -func TestDB_IterMultiWithDelete(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("a", "va") - h.put("b", "vb") - h.put("c", "vc") - h.delete("b") - h.get("b", false) - - iter := h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - - h.compactMem() - - iter = h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - }) -} - -func TestDB_IteratorPinsRef(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "hello") - - // Get iterator that will yield the current contents of the DB. - iter := h.db.NewIterator(nil, nil) - - // Write to force compactions - h.put("foo", "newvalue1") - for i := 0; i < 100; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - h.put("foo", "newvalue2") - - iter.First() - testKeyVal(t, iter, "foo->hello") - if iter.Next() { - t.Errorf("expect eof") - } - iter.Release() -} - -func TestDB_Recover(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("baz", "v5") - - h.reopenDB() - h.getVal("foo", "v1") - - h.getVal("foo", "v1") - h.getVal("baz", "v5") - h.put("bar", "v2") - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - h.put("foo", "v4") - h.getVal("foo", "v4") - h.getVal("bar", "v2") - h.getVal("baz", "v5") - }) -} - -func TestDB_RecoverWithEmptyJournal(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("foo", "v2") - - h.reopenDB() - h.reopenDB() - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - }) -} - -func TestDB_RecoverDuringMemtableCompaction(t *testing.T) { - truno(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 1000000}, func(h *dbHarness) { - - h.stor.Stall(testutil.ModeSync, storage.TypeTable) - h.put("big1", strings.Repeat("x", 10000000)) - h.put("big2", strings.Repeat("y", 1000)) - h.put("bar", "v2") - h.stor.Release(testutil.ModeSync, storage.TypeTable) - - h.reopenDB() - h.getVal("bar", "v2") - h.getVal("big1", strings.Repeat("x", 10000000)) - h.getVal("big2", strings.Repeat("y", 1000)) - }) -} - -func TestDB_MinorCompactionsHappen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 10000}) - defer h.close() - - n := 500 - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - for i := 0; i < n; i++ { - h.put(key(i), key(i)+strings.Repeat("v", 1000)) - } - - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } - - h.reopenDB() - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } -} - -func TestDB_RecoverWithLargeJournal(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("big1", strings.Repeat("1", 200000)) - h.put("big2", strings.Repeat("2", 200000)) - h.put("small3", strings.Repeat("3", 10)) - h.put("small4", strings.Repeat("4", 10)) - h.tablesPerLevel("") - - // Make sure that if we re-open with a small write buffer size that - // we flush table files in the middle of a large journal file. - h.o.WriteBuffer = 100000 - h.reopenDB() - h.getVal("big1", strings.Repeat("1", 200000)) - h.getVal("big2", strings.Repeat("2", 200000)) - h.getVal("small3", strings.Repeat("3", 10)) - h.getVal("small4", strings.Repeat("4", 10)) - v := h.db.s.version() - if v.tLen(0) <= 1 { - t.Errorf("tables-0 less than one") - } - v.release() -} - -func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - WriteBuffer: 10000000, - Compression: opt.NoCompression, - }) - defer h.close() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - n := 80 - - // Write 8MB (80 values, each 100K) - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - - // Reopening moves updates to level-0 - h.reopenDB() - h.compactRangeAt(0, "", "") - - v = h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - if v.tLen(1) <= 1 { - t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) - } - v.release() - - for i := 0; i < n; i++ { - h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } -} - -func TestDB_RepeatedWritesToSameKey(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 100000}) - defer h.close() - - maxTables := h.o.GetWriteL0PauseTrigger() + 7 - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - WriteBuffer: 100000, - }) - defer h.close() - - h.reopenDB() - - maxTables := h.o.GetWriteL0PauseTrigger() + 7 - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_SparseMerge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, Compression: opt.NoCompression}) - defer h.close() - - h.putMulti(7, "A", "Z") - - // Suppose there is: - // small amount of data with prefix A - // large amount of data with prefix B - // small amount of data with prefix C - // and that recent updates have made small changes to all three prefixes. - // Check that we do not do a compaction that merges all of B in one shot. - h.put("A", "va") - value := strings.Repeat("x", 1000) - for i := 0; i < 100000; i++ { - h.put(fmt.Sprintf("B%010d", i), value) - } - h.put("C", "vc") - h.compactMem() - h.compactRangeAt(0, "", "") - h.waitCompaction() - - // Make sparse update - h.put("A", "va2") - h.put("B100", "bvalue2") - h.put("C", "vc2") - h.compactMem() - - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(0, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(1, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) -} - -func TestDB_SizeOf(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - Compression: opt.NoCompression, - WriteBuffer: 10000000, - }) - defer h.close() - - h.sizeAssert("", "xyz", 0, 0) - h.reopenDB() - h.sizeAssert("", "xyz", 0, 0) - - // Write 8MB (80 values, each 100K) - n := 80 - s1 := 100000 - s2 := 105000 - - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) - } - - // 0 because SizeOf() does not account for memtable space - h.sizeAssert("", numKey(50), 0, 0) - - for r := 0; r < 3; r++ { - h.reopenDB() - - for cs := 0; cs < n; cs += 10 { - for i := 0; i < n; i += 10 { - h.sizeAssert("", numKey(i), int64(s1*i), int64(s2*i)) - h.sizeAssert("", numKey(i)+".suffix", int64(s1*(i+1)), int64(s2*(i+1))) - h.sizeAssert(numKey(i), numKey(i+10), int64(s1*10), int64(s2*10)) - } - - h.sizeAssert("", numKey(50), int64(s1*50), int64(s2*50)) - h.sizeAssert("", numKey(50)+".suffix", int64(s1*50), int64(s2*50)) - - h.compactRangeAt(0, numKey(cs), numKey(cs+9)) - } - - v := h.db.s.version() - if v.tLen(0) != 0 { - t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) - } - if v.tLen(1) == 0 { - t.Error("level-1 tables was zero") - } - v.release() - } -} - -func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - Compression: opt.NoCompression, - }) - defer h.close() - - sizes := []int64{ - 10000, - 10000, - 100000, - 10000, - 100000, - 10000, - 300000, - 10000, - } - - for i, n := range sizes { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) - } - - for r := 0; r < 3; r++ { - h.reopenDB() - - var x int64 - for i, n := range sizes { - y := x - if i > 0 { - y += 1000 - } - h.sizeAssert("", numKey(i), x, y) - x += n - } - - h.sizeAssert(numKey(3), numKey(5), 110000, 111000) - - h.compactRangeAt(0, "", "") - } -} - -func TestDB_Snapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - s1 := h.getSnapshot() - h.put("foo", "v2") - s2 := h.getSnapshot() - h.put("foo", "v3") - s3 := h.getSnapshot() - h.put("foo", "v4") - - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getValr(s3, "foo", "v3") - h.getVal("foo", "v4") - - s3.Release() - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s1.Release() - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s2.Release() - h.getVal("foo", "v4") - }) -} - -func TestDB_SnapshotList(t *testing.T) { - db := &DB{snapsList: list.New()} - e0a := db.acquireSnapshot() - e0b := db.acquireSnapshot() - db.seq = 1 - e1 := db.acquireSnapshot() - db.seq = 2 - e2 := db.acquireSnapshot() - - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0a) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0b) - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - e2 = db.acquireSnapshot() - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e1) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } -} - -func TestDB_HiddenValuesAreRemoved(t *testing.T) { - trun(t, func(h *dbHarness) { - s := h.db.s - - m := 2 - h.db.memdbMaxLevel = m - - h.put("foo", "v1") - h.compactMem() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.put("foo", "v2") - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactMem() - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactRangeAt(m-2, "", "z") - // DEL eliminated, but v1 remains because we aren't compacting that level - // (DEL can be eliminated because v2 hides v1). - h.allEntriesFor("foo", "[ v2, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ v2 ]") - }) -} - -func TestDB_DeletionMarkers2(t *testing.T) { - h := newDbHarness(t) - defer h.close() - s := h.db.s - - m := 2 - h.db.memdbMaxLevel = m - - h.put("foo", "v1") - h.compactMem() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactMem() // Moves to level last-2 - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-2, "", "") - // DEL kept: "last" file overlaps - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ ]") -} - -func TestDB_CompactionTableOpenError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - OpenFilesCacheCapacity: -1, - }) - defer h.close() - - h.db.memdbMaxLevel = 2 - - im := 10 - jm := 10 - for r := 0; r < 2; r++ { - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - h.compactMem() - } - } - - if n := h.totalTables(); n != im*2 { - t.Errorf("total tables is %d, want %d", n, im*2) - } - - h.stor.EmulateError(testutil.ModeOpen, storage.TypeTable, errors.New("open error during table compaction")) - go h.db.CompactRange(util.Range{}) - if err := h.db.compTriggerWait(h.db.tcompCmdC); err != nil { - t.Log("compaction error: ", err) - } - h.closeDB0() - h.openDB() - h.stor.EmulateError(testutil.ModeOpen, storage.TypeTable, nil) - - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - } -} - -func TestDB_OverlapInLevel0(t *testing.T) { - trun(t, func(h *dbHarness) { - h.db.memdbMaxLevel = 2 - - // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. - h.put("100", "v100") - h.put("999", "v999") - h.compactMem() - h.delete("100") - h.delete("999") - h.compactMem() - h.tablesPerLevel("0,1,1") - - // Make files spanning the following ranges in level-0: - // files[0] 200 .. 900 - // files[1] 300 .. 500 - // Note that files are sorted by min key. - h.put("300", "v300") - h.put("500", "v500") - h.compactMem() - h.put("200", "v200") - h.put("600", "v600") - h.put("900", "v900") - h.compactMem() - h.tablesPerLevel("2,1,1") - - // Compact away the placeholder files we created initially - h.compactRangeAt(1, "", "") - h.compactRangeAt(2, "", "") - h.tablesPerLevel("2") - - // Do a memtable compaction. Before bug-fix, the compaction would - // not detect the overlap with level-0 files and would incorrectly place - // the deletion in a deeper level. - h.delete("600") - h.compactMem() - h.tablesPerLevel("3") - h.get("600", false) - }) -} - -func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("b", "v") - h.reopenDB() - h.delete("b") - h.delete("a") - h.reopenDB() - h.delete("a") - h.reopenDB() - h.put("a", "v") - h.reopenDB() - h.reopenDB() - h.getKeyVal("(a->v)") - h.waitCompaction() - h.getKeyVal("(a->v)") -} - -func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("e") - h.put("", "") - h.reopenDB() - h.put("c", "cv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.put("", "") - h.waitCompaction() - h.reopenDB() - h.put("d", "dv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("d") - h.delete("b") - h.reopenDB() - h.getKeyVal("(->)(c->cv)") - h.waitCompaction() - h.getKeyVal("(->)(c->cv)") -} - -func TestDB_SingleEntryMemCompaction(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 10; i++ { - h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) - h.compactMem() - h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) - h.compactMem() - h.put("k", "v") - h.compactMem() - h.put("", "") - h.compactMem() - h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) - h.compactMem() - } - }) -} - -func TestDB_ManifestWriteError(t *testing.T) { - for i := 0; i < 2; i++ { - func() { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "bar") - h.getVal("foo", "bar") - - // Mem compaction (will succeed) - h.compactMem() - h.getVal("foo", "bar") - v := h.db.s.version() - if n := v.tLen(0); n != 1 { - t.Errorf("invalid total tables, want=1 got=%d", n) - } - v.release() - - if i == 0 { - h.stor.EmulateError(testutil.ModeWrite, storage.TypeManifest, errors.New("manifest write error")) - } else { - h.stor.EmulateError(testutil.ModeSync, storage.TypeManifest, errors.New("manifest sync error")) - } - - // Merging compaction (will fail) - h.compactRangeAtErr(0, "", "", true) - - h.db.Close() - h.stor.EmulateError(testutil.ModeWrite, storage.TypeManifest, nil) - h.stor.EmulateError(testutil.ModeSync, storage.TypeManifest, nil) - - // Should not lose data - h.openDB() - h.getVal("foo", "bar") - }() - } -} - -func assertErr(t *testing.T, err error, wanterr bool) { - if err != nil { - if wanterr { - t.Log("AssertErr: got error (expected): ", err) - } else { - t.Error("AssertErr: got error: ", err) - } - } else if wanterr { - t.Error("AssertErr: expect error") - } -} - -func TestDB_ClosedIsClosed(t *testing.T) { - h := newDbHarness(t) - db := h.db - - var iter, iter2 iterator.Iterator - var snap *Snapshot - func() { - defer h.close() - - h.put("k", "v") - h.getVal("k", "v") - - iter = db.NewIterator(nil, h.ro) - iter.Seek([]byte("k")) - testKeyVal(t, iter, "k->v") - - var err error - snap, err = db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.getValr(snap, "k", "v") - - iter2 = snap.NewIterator(nil, h.ro) - iter2.Seek([]byte("k")) - testKeyVal(t, iter2, "k->v") - - h.put("foo", "v2") - h.delete("foo") - - // closing DB - iter.Release() - iter2.Release() - }() - - assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) - _, err := db.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - if iter.Valid() { - t.Errorf("iter.Valid should false") - } - assertErr(t, iter.Error(), false) - testKeyVal(t, iter, "->") - if iter.Seek([]byte("k")) { - t.Errorf("iter.Seek should false") - } - assertErr(t, iter.Error(), true) - - assertErr(t, iter2.Error(), false) - - _, err = snap.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - _, err = db.GetSnapshot() - assertErr(t, err, true) - - iter3 := db.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - iter3 = snap.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - assertErr(t, db.Delete([]byte("k"), h.wo), true) - - _, err = db.GetProperty("leveldb.stats") - assertErr(t, err, true) - - _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) - assertErr(t, err, true) - - assertErr(t, db.CompactRange(util.Range{}), true) - - assertErr(t, db.Close(), true) -} - -type numberComparer struct{} - -func (numberComparer) num(x []byte) (n int) { - fmt.Sscan(string(x[1:len(x)-1]), &n) - return -} - -func (numberComparer) Name() string { - return "test.NumberComparer" -} - -func (p numberComparer) Compare(a, b []byte) int { - return p.num(a) - p.num(b) -} - -func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } -func (numberComparer) Successor(dst, b []byte) []byte { return nil } - -func TestDB_CustomComparer(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - Comparer: numberComparer{}, - WriteBuffer: 1000, - }) - defer h.close() - - h.put("[10]", "ten") - h.put("[0x14]", "twenty") - for i := 0; i < 2; i++ { - h.getVal("[10]", "ten") - h.getVal("[0xa]", "ten") - h.getVal("[20]", "twenty") - h.getVal("[0x14]", "twenty") - h.get("[15]", false) - h.get("[0xf]", false) - h.compactMem() - h.compactRange("[0]", "[9999]") - } - - for n := 0; n < 2; n++ { - for i := 0; i < 100; i++ { - v := fmt.Sprintf("[%d]", i*10) - h.put(v, v) - } - h.compactMem() - h.compactRange("[0]", "[1000000]") - } -} - -func TestDB_ManualCompaction(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.db.memdbMaxLevel = 2 - - h.putMulti(3, "p", "q") - h.tablesPerLevel("1,1,1") - - // Compaction range falls before files - h.compactRange("", "c") - h.tablesPerLevel("1,1,1") - - // Compaction range falls after files - h.compactRange("r", "z") - h.tablesPerLevel("1,1,1") - - // Compaction range overlaps files - h.compactRange("p1", "p9") - h.tablesPerLevel("0,0,1") - - // Populate a different range - h.putMulti(3, "c", "e") - h.tablesPerLevel("1,1,2") - - // Compact just the new range - h.compactRange("b", "f") - h.tablesPerLevel("0,0,2") - - // Compact all - h.putMulti(1, "a", "z") - h.tablesPerLevel("0,1,2") - h.compactRange("", "") - h.tablesPerLevel("0,0,1") -} - -func TestDB_BloomFilter(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - DisableBlockCache: true, - Filter: filter.NewBloomFilter(10), - }) - defer h.close() - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - const n = 10000 - - // Populate multiple layers - for i := 0; i < n; i++ { - h.put(key(i), key(i)) - } - h.compactMem() - h.compactRange("a", "z") - for i := 0; i < n; i += 100 { - h.put(key(i), key(i)) - } - h.compactMem() - - // Prevent auto compactions triggered by seeks - h.stor.Stall(testutil.ModeSync, storage.TypeTable) - - // Lookup present keys. Should rarely read from small sstable. - h.stor.ResetCounter(testutil.ModeRead, storage.TypeTable) - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)) - } - cnt, _ := h.stor.Counter(testutil.ModeRead, storage.TypeTable) - t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) - if min, max := n, n+2*n/100; cnt < min || cnt > max { - t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) - } - - // Lookup missing keys. Should rarely read from either sstable. - h.stor.ResetCounter(testutil.ModeRead, storage.TypeTable) - for i := 0; i < n; i++ { - h.get(key(i)+".missing", false) - } - cnt, _ = h.stor.Counter(testutil.ModeRead, storage.TypeTable) - t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) - if max := 3 * n / 100; cnt > max { - t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) - } - - h.stor.Release(testutil.ModeSync, storage.TypeTable) -} - -func TestDB_Concurrent(t *testing.T) { - const n, secs, maxkey = 4, 6, 1000 - h := newDbHarness(t) - defer h.close() - - runtime.GOMAXPROCS(runtime.NumCPU()) - - var ( - closeWg sync.WaitGroup - stop uint32 - cnt [n]uint32 - ) - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - var put, get, found uint - defer func() { - t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", - i, cnt[i], put, get, found, get-found) - closeWg.Done() - }() - - rnd := rand.New(rand.NewSource(int64(1000 + i))) - for atomic.LoadUint32(&stop) == 0 { - x := cnt[i] - - k := rnd.Intn(maxkey) - kstr := fmt.Sprintf("%016d", k) - - if (rnd.Int() % 2) > 0 { - put++ - h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) - } else { - get++ - v, err := h.db.Get([]byte(kstr), h.ro) - if err == nil { - found++ - rk, ri, rx := 0, -1, uint32(0) - fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) - if rk != k { - t.Errorf("invalid key want=%d got=%d", k, rk) - } - if ri < 0 || ri >= n { - t.Error("invalid goroutine number: ", ri) - } else { - tx := atomic.LoadUint32(&(cnt[ri])) - if rx > tx { - t.Errorf("invalid seq number, %d > %d ", rx, tx) - } - } - } else if err != ErrNotFound { - t.Error("Get: got error: ", err) - return - } - } - atomic.AddUint32(&cnt[i], 1) - } - }(i) - } - - time.Sleep(secs * time.Second) - atomic.StoreUint32(&stop, 1) - closeWg.Wait() -} - -func TestDB_ConcurrentIterator(t *testing.T) { - const n, n2 = 4, 1000 - h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 30}) - defer h.close() - - runtime.GOMAXPROCS(runtime.NumCPU()) - - var ( - closeWg sync.WaitGroup - stop uint32 - ) - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 0; atomic.LoadUint32(&stop) == 0; k++ { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - cmp := comparer.DefaultComparer - for i := 0; i < n2; i++ { - closeWg.Add(1) - go func(i int) { - it := h.db.NewIterator(nil, nil) - var pk []byte - for it.Next() { - kk := it.Key() - if cmp.Compare(kk, pk) <= 0 { - t.Errorf("iter %d: %q is successor of %q", i, pk, kk) - } - pk = append(pk[:0], kk...) - var k, vk, vi int - if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { - t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) - } else if n < 1 { - t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) - } - if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { - t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) - } else if n < 2 { - t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) - } - - if vk != k { - t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) - } - } - if err := it.Error(); err != nil { - t.Errorf("iter %d: Got error: %v", i, err) - } - it.Release() - closeWg.Done() - }(i) - } - - atomic.StoreUint32(&stop, 1) - closeWg.Wait() -} - -func TestDB_ConcurrentWrite(t *testing.T) { - const n, bk, niter = 10, 3, 10000 - h := newDbHarness(t) - defer h.close() - - runtime.GOMAXPROCS(runtime.NumCPU()) - - var wg sync.WaitGroup - for i := 0; i < n; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - for k := 0; k < niter; k++ { - kstr := fmt.Sprintf("put-%d.%d", i, k) - vstr := fmt.Sprintf("v%d", k) - h.put(kstr, vstr) - // Key should immediately available after put returns. - h.getVal(kstr, vstr) - } - }(i) - } - for i := 0; i < n; i++ { - wg.Add(1) - batch := &Batch{} - go func(i int) { - defer wg.Done() - for k := 0; k < niter; k++ { - batch.Reset() - for j := 0; j < bk; j++ { - batch.Put([]byte(fmt.Sprintf("batch-%d.%d.%d", i, k, j)), []byte(fmt.Sprintf("v%d", k))) - } - h.write(batch) - // Key should immediately available after put returns. - for j := 0; j < bk; j++ { - h.getVal(fmt.Sprintf("batch-%d.%d.%d", i, k, j), fmt.Sprintf("v%d", k)) - } - } - }(i) - } - wg.Wait() -} - -func TestDB_CreateReopenDbOnFile(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - stor, err := storage.OpenFile(dbpath, false) - if err != nil { - t.Fatalf("(%d) cannot open storage: %s", i, err) - } - db, err := Open(stor, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - if err := stor.Close(); err != nil { - t.Fatalf("(%d) cannot close storage: %s", i, err) - } - } -} - -func TestDB_CreateReopenDbOnFile2(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - db, err := OpenFile(dbpath, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - } -} - -func TestDB_DeletionMarkersOnMemdb(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.compactMem() - h.delete("foo") - h.get("foo", false) - h.getKeyVal("") -} - -func TestDB_LeveldbIssue178(t *testing.T) { - nKeys := (opt.DefaultCompactionTableSize / 30) * 5 - key1 := func(i int) string { - return fmt.Sprintf("my_key_%d", i) - } - key2 := func(i int) string { - return fmt.Sprintf("my_key_%d_xxx", i) - } - - // Disable compression since it affects the creation of layers and the - // code below is trying to test against a very specific scenario. - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - Compression: opt.NoCompression, - }) - defer h.close() - - // Create first key range. - batch := new(Batch) - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key1(i)), []byte("value for range 1 key")) - } - h.write(batch) - - // Create second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key2(i)), []byte("value for range 2 key")) - } - h.write(batch) - - // Delete second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Delete([]byte(key2(i))) - } - h.write(batch) - h.waitMemCompaction() - - // Run manual compaction. - h.compactRange(key1(0), key1(nKeys-1)) - - // Checking the keys. - h.assertNumKeys(nKeys) -} - -func TestDB_LeveldbIssue200(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("1", "b") - h.put("2", "c") - h.put("3", "d") - h.put("4", "e") - h.put("5", "f") - - iter := h.db.NewIterator(nil, h.ro) - - // Add an element that should not be reflected in the iterator. - h.put("25", "cd") - - iter.Seek([]byte("5")) - assertBytes(t, []byte("5"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("4"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("3"), iter.Key()) - iter.Next() - assertBytes(t, []byte("4"), iter.Key()) - iter.Next() - assertBytes(t, []byte("5"), iter.Key()) -} - -func TestDB_GoleveldbIssue74(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - WriteBuffer: 1 * opt.MiB, - }) - defer h.close() - - const n, dur = 10000, 5 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(2) - var done uint32 - go func() { - var i int - defer func() { - t.Logf("WRITER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - - b := new(Batch) - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iv := fmt.Sprintf("VAL%010d", i) - for k := 0; k < n; k++ { - key := fmt.Sprintf("KEY%06d", k) - b.Put([]byte(key), []byte(key+iv)) - b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key)) - } - h.write(b) - - b.Reset() - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err) - } else if string(value) != string(key)+iv { - t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value) - } - - b.Delete(key) - b.Delete(ptrKey) - } - h.write(b) - iter.Release() - snap.Release() - if k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var prevValue string - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err) - } else if prevValue != "" && string(value) != string(key)+prevValue { - t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value) - } else { - prevValue = string(value[len(key):]) - } - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - wg.Wait() -} - -func TestDB_GetProperties(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - _, err := h.db.GetProperty("leveldb.num-files-at-level") - if err == nil { - t.Error("GetProperty() failed to detect missing level") - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0") - if err != nil { - t.Error("got unexpected error", err) - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0x") - if err == nil { - t.Error("GetProperty() failed to detect invalid level") - } -} - -func TestDB_GoleveldbIssue72and83(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - WriteBuffer: 1 * opt.MiB, - OpenFilesCacheCapacity: 3, - }) - defer h.close() - - const n, wn, dur = 10000, 100, 30 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - randomData := func(prefix byte, i int) []byte { - data := make([]byte, 1+4+32+64+32) - _, err := crand.Reader.Read(data[1 : len(data)-8]) - if err != nil { - panic(err) - } - data[0] = prefix - binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i)) - binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value()) - return data - } - - keys := make([][]byte, n) - for i := range keys { - keys[i] = randomData(1, 0) - } - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(3) - var done uint32 - go func() { - i := 0 - defer func() { - t.Logf("WRITER DONE #%d", i) - wg.Done() - }() - - b := new(Batch) - for ; i < wn && atomic.LoadUint32(&done) == 0; i++ { - b.Reset() - for _, k1 := range keys { - k2 := randomData(2, i) - b.Put(k2, randomData(42, i)) - b.Put(k1, k2) - } - if err := h.db.Write(b, h.wo); err != nil { - atomic.StoreUint32(&done, 1) - t.Fatalf("WRITER #%d db.Write: %v", i, err) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER0 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - seq := snap.elem.seq - if seq == 0 { - snap.Release() - continue - } - iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) - writei := int(seq/(n*2) - 1) - var k int - for ; iter.Next(); k++ { - k1 := iter.Key() - k2 := iter.Value() - k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:]) - k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value() - if k1checksum0 != k1checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0) - } - k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:]) - k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value() - if k2checksum0 != k2checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1) - } - kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:])) - if writei != kwritei { - t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei) - } - if _, err := snap.Get(k2, nil); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2) - } - } - if err := iter.Error(); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err) - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER1 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iter := h.db.NewIterator(nil, nil) - seq := iter.(*dbIter).seq - if seq == 0 { - iter.Release() - continue - } - writei := int(seq/(n*2) - 1) - var k int - for ok := iter.Last(); ok; ok = iter.Prev() { - k++ - } - if err := iter.Error(); err != nil { - t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err) - } - iter.Release() - if m := (writei+1)*n + n; k != m { - t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m) - } - } - }() - - wg.Wait() -} - -func TestDB_TransientError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - WriteBuffer: 128 * opt.KiB, - OpenFilesCacheCapacity: 3, - DisableCompactionBackoff: true, - }) - defer h.close() - - const ( - nSnap = 20 - nKey = 10000 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, errors.New("table transient read error")) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, nil) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Delete([]byte(key)) - } - h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, errors.New("table transient read error")) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, nil) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - } - h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, nil) - - runtime.GOMAXPROCS(runtime.NumCPU()) - - rnd := rand.New(rand.NewSource(0xecafdaed)) - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(2) - - go func(i int, snap *Snapshot, sk []int) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for _, k := range sk { - key := fmt.Sprintf("KEY%8d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap, rnd.Perm(nKey)) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - iter := snap.NewIterator(nil, nil) - defer iter.Release() - for k := 0; k < nKey; k++ { - if !iter.Next() { - if err := iter.Error(); err != nil { - t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err) - } else { - t.Fatalf("READER_ITER #%d K%d eoi", i, k) - } - } - key := fmt.Sprintf("KEY%8d", k) - xkey := iter.Key() - if !bytes.Equal([]byte(key), xkey) { - t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey) - } - value := key + vtail - xvalue := iter.Value() - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - CompactionExpandLimitFactor: 1, - }) - defer h.close() - - const ( - nSnap = 190 - nKey = 140 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Delete([]byte(key)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - } - - h.compactMem() - - h.waitCompaction() - for level, tables := range h.db.s.stVersion.levels { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.fd.Num, table.imin, table.imax) - } - } - - h.compactRangeAt(0, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.levels { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.fd.Num, table.imin, table.imax) - } - } - h.compactRangeAt(1, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.levels { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.fd.Num, table.imin, table.imax) - } - } - runtime.GOMAXPROCS(runtime.NumCPU()) - - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(1) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_TableCompactionBuilder(t *testing.T) { - gomega.RegisterTestingT(t) - stor := testutil.NewStorage() - stor.OnLog(testingLogger(t)) - stor.OnClose(testingPreserveOnFailed(t)) - defer stor.Close() - - const nSeq = 99 - - o := &opt.Options{ - DisableLargeBatchTransaction: true, - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 43 * opt.KiB, - CompactionExpandLimitFactor: 1, - CompactionGPOverlapsFactor: 1, - DisableBlockCache: true, - } - s, err := newSession(stor, o) - if err != nil { - t.Fatal(err) - } - if err := s.create(); err != nil { - t.Fatal(err) - } - defer s.close() - var ( - seq uint64 - targetSize = 5 * o.CompactionTableSize - value = bytes.Repeat([]byte{'0'}, 100) - ) - for i := 0; i < 2; i++ { - tw, err := s.tops.create() - if err != nil { - t.Fatal(err) - } - for k := 0; tw.tw.BytesLen() < targetSize; k++ { - key := []byte(fmt.Sprintf("%09d", k)) - seq += nSeq - 1 - for x := uint64(0); x < nSeq; x++ { - if err := tw.append(makeInternalKey(nil, key, seq-x, keyTypeVal), value); err != nil { - t.Fatal(err) - } - } - } - tf, err := tw.finish() - if err != nil { - t.Fatal(err) - } - rec := &sessionRecord{} - rec.addTableFile(i, tf) - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - } - - // Build grandparent. - v := s.version() - c := newCompaction(s, v, 1, append(tFiles{}, v.levels[1]...)) - rec := &sessionRecord{} - b := &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize/3 + 961, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.levels[0] { - rec.delTable(c.sourceLevel, t.fd.Num) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - // Build level-1. - v = s.version() - c = newCompaction(s, v, 0, append(tFiles{}, v.levels[0]...)) - rec = &sessionRecord{} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.levels[0] { - rec.delTable(c.sourceLevel, t.fd.Num) - } - // Move grandparent to level-3 - for _, t := range v.levels[2] { - rec.delTable(2, t.fd.Num) - rec.addTableFile(3, t) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - v = s.version() - for level, want := range []bool{false, true, false, true} { - got := len(v.levels[level]) > 0 - if want != got { - t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got) - } - } - for i, f := range v.levels[1][:len(v.levels[1])-1] { - nf := v.levels[1][i+1] - if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) { - t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.fd.Num, nf.fd.Num) - } - } - v.release() - - // Compaction with transient error. - v = s.version() - c = newCompaction(s, v, 1, append(tFiles{}, v.levels[1]...)) - rec = &sessionRecord{} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - stor.EmulateErrorOnce(testutil.ModeSync, storage.TypeTable, errors.New("table sync error (once)")) - stor.EmulateRandomError(testutil.ModeRead|testutil.ModeWrite, storage.TypeTable, 0.01, errors.New("table random IO error")) - for { - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Logf("(expected) b.run: %v", err) - } else { - break - } - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - stor.EmulateErrorOnce(testutil.ModeSync, storage.TypeTable, nil) - stor.EmulateRandomError(testutil.ModeRead|testutil.ModeWrite, storage.TypeTable, 0, nil) - - v = s.version() - if len(v.levels[1]) != len(v.levels[2]) { - t.Fatalf("invalid tables length, want %d, got %d", len(v.levels[1]), len(v.levels[2])) - } - for i, f0 := range v.levels[1] { - f1 := v.levels[2][i] - iter0 := s.tops.newIterator(f0, nil, nil) - iter1 := s.tops.newIterator(f1, nil, nil) - for j := 0; true; j++ { - next0 := iter0.Next() - next1 := iter1.Next() - if next0 != next1 { - t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1) - } - key0 := iter0.Key() - key1 := iter1.Key() - if !bytes.Equal(key0, key1) { - t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1) - } - if next0 == false { - break - } - } - iter0.Release() - iter1.Release() - } - v.release() -} - -func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) { - const ( - vSize = 200 * opt.KiB - tSize = 100 * opt.MiB - mIter = 100 - n = tSize / vSize - ) - - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - Compression: opt.NoCompression, - DisableBlockCache: true, - }) - defer h.close() - - h.db.memdbMaxLevel = 2 - - key := func(x int) string { - return fmt.Sprintf("v%06d", x) - } - - // Fill. - value := strings.Repeat("x", vSize) - for i := 0; i < n; i++ { - h.put(key(i), value) - } - h.compactMem() - - // Delete all. - for i := 0; i < n; i++ { - h.delete(key(i)) - } - h.compactMem() - - var ( - limit = n / limitDiv - - startKey = key(0) - limitKey = key(limit) - maxKey = key(n) - slice = &util.Range{Limit: []byte(limitKey)} - - initialSize0 = h.sizeOf(startKey, limitKey) - initialSize1 = h.sizeOf(limitKey, maxKey) - ) - - t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1))) - - for r := 0; true; r++ { - if r >= mIter { - t.Fatal("taking too long to compact") - } - - // Iterates. - iter := h.db.NewIterator(slice, h.ro) - for iter.Next() { - } - if err := iter.Error(); err != nil { - t.Fatalf("Iter err: %v", err) - } - iter.Release() - - // Wait compaction. - h.waitCompaction() - - // Check size. - size0 := h.sizeOf(startKey, limitKey) - size1 := h.sizeOf(limitKey, maxKey) - t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1))) - if size0 < initialSize0/10 { - break - } - } - - if initialSize1 > 0 { - h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB) - } -} - -func TestDB_IterTriggeredCompaction(t *testing.T) { - testDB_IterTriggeredCompaction(t, 1) -} - -func TestDB_IterTriggeredCompactionHalf(t *testing.T) { - testDB_IterTriggeredCompaction(t, 2) -} - -func TestDB_ReadOnly(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.put("bar", "v2") - h.compactMem() - - h.put("xfoo", "v1") - h.put("xbar", "v2") - - t.Log("Trigger read-only") - if err := h.db.SetReadOnly(); err != nil { - h.close() - t.Fatalf("SetReadOnly error: %v", err) - } - - mode := testutil.ModeCreate | testutil.ModeRemove | testutil.ModeRename | testutil.ModeWrite | testutil.ModeSync - h.stor.EmulateError(mode, storage.TypeAll, errors.New("read-only DB shouldn't writes")) - - ro := func(key, value, wantValue string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly { - t.Fatalf("unexpected error: %v", err) - } - h.getVal(key, wantValue) - } - - ro("foo", "vx", "v1") - - h.o.ReadOnly = true - h.reopenDB() - - ro("foo", "vx", "v1") - ro("bar", "vx", "v2") - h.assertNumKeys(4) -} - -func TestDB_BulkInsertDelete(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - Compression: opt.NoCompression, - CompactionTableSize: 128 * opt.KiB, - CompactionTotalSize: 1 * opt.MiB, - WriteBuffer: 256 * opt.KiB, - }) - defer h.close() - - const R = 100 - const N = 2500 - key := make([]byte, 4) - value := make([]byte, 256) - for i := 0; i < R; i++ { - offset := N * i - for j := 0; j < N; j++ { - binary.BigEndian.PutUint32(key, uint32(offset+j)) - h.db.Put(key, value, nil) - } - for j := 0; j < N; j++ { - binary.BigEndian.PutUint32(key, uint32(offset+j)) - h.db.Delete(key, nil) - } - } - - if tot := h.totalTables(); tot > 10 { - t.Fatalf("too many uncompacted tables: %d (%s)", tot, h.getTablesPerLevel()) - } -} - -func TestDB_GracefulClose(t *testing.T) { - runtime.GOMAXPROCS(4) - h := newDbHarnessWopt(t, &opt.Options{ - DisableLargeBatchTransaction: true, - Compression: opt.NoCompression, - CompactionTableSize: 1 * opt.MiB, - WriteBuffer: 1 * opt.MiB, - }) - defer h.close() - - var closeWait sync.WaitGroup - - // During write. - n := 0 - closing := false - for i := 0; i < 1000000; i++ { - if !closing && h.totalTables() > 3 { - t.Logf("close db during write, index=%d", i) - closeWait.Add(1) - go func() { - h.closeDB() - closeWait.Done() - }() - closing = true - } - if err := h.db.Put([]byte(fmt.Sprintf("%09d", i)), []byte(fmt.Sprintf("VAL-%09d", i)), h.wo); err != nil { - t.Logf("Put error: %s (expected)", err) - n = i - break - } - } - closeWait.Wait() - - // During read. - h.openDB() - closing = false - for i := 0; i < n; i++ { - if !closing && i > n/2 { - t.Logf("close db during read, index=%d", i) - closeWait.Add(1) - go func() { - h.closeDB() - closeWait.Done() - }() - closing = true - } - if _, err := h.db.Get([]byte(fmt.Sprintf("%09d", i)), h.ro); err != nil { - t.Logf("Get error: %s (expected)", err) - break - } - } - closeWait.Wait() - - // During iterate. - h.openDB() - closing = false - iter := h.db.NewIterator(nil, h.ro) - for i := 0; iter.Next(); i++ { - if len(iter.Key()) == 0 || len(iter.Value()) == 0 { - t.Error("Key or value has zero length") - } - if !closing { - t.Logf("close db during iter, index=%d", i) - closeWait.Add(1) - go func() { - h.closeDB() - closeWait.Done() - }() - closing = true - } - time.Sleep(time.Millisecond) - } - if err := iter.Error(); err != nil { - t.Logf("Iter error: %s (expected)", err) - } - iter.Release() - closeWait.Wait() -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go index b8f7e7d2..1a000018 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go @@ -69,6 +69,10 @@ func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) { // DB. And a nil Range.Limit is treated as a key after all keys in // the DB. // +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// // The iterator must be released after use, by calling Release method. // // Also read Iterator documentation of the leveldb/iterator package. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go index 7ecd960d..3f065489 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go @@ -84,7 +84,7 @@ func (db *DB) checkAndCleanFiles() error { var mfds []storage.FileDesc for num, present := range tmap { if !present { - mfds = append(mfds, storage.FileDesc{storage.TypeTable, num}) + mfds = append(mfds, storage.FileDesc{Type: storage.TypeTable, Num: num}) db.logf("db@janitor table missing @%d", num) } } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go index 5b6cb487..db0c1bec 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go @@ -7,6 +7,7 @@ package leveldb import ( + "sync/atomic" "time" "github.com/syndtr/goleveldb/leveldb/memdb" @@ -88,7 +89,11 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { return false case tLen >= pauseTrigger: delayed = true + // Set the write paused flag explicitly. + atomic.StoreInt32(&db.inWritePaused, 1) err = db.compTriggerWait(db.tcompCmdC) + // Unset the write paused flag. + atomic.StoreInt32(&db.inWritePaused, 0) if err != nil { return false } @@ -117,6 +122,8 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { db.writeDelayN++ } else if db.writeDelayN > 0 { db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) + atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN)) + atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay)) db.writeDelay = 0 db.writeDelayN = 0 } @@ -143,7 +150,7 @@ func (db *DB) unlockWrite(overflow bool, merged int, err error) { } } -// ourBatch if defined should equal with batch. +// ourBatch is batch that we can modify. func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error { // Try to flush memdb. This method would also trying to throttle writes // if it is too fast and compaction cannot catch-up. @@ -212,6 +219,11 @@ func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error { } } + // Release ourBatch if any. + if ourBatch != nil { + defer db.batchPool.Put(ourBatch) + } + // Seq number. seq := db.seq + 1 diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/external_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/external_test.go deleted file mode 100644 index 669d77f3..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/external_test.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Leveldb external", func() { - o := &opt.Options{ - DisableBlockCache: true, - BlockRestartInterval: 5, - BlockSize: 80, - Compression: opt.NoCompression, - OpenFilesCacheCapacity: -1, - Strict: opt.StrictAll, - WriteBuffer: 1000, - CompactionTableSize: 2000, - } - - Describe("write test", func() { - It("should do write correctly", func(done Done) { - db := newTestingDB(o, nil, nil) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 500, 1, 1, 50, 5, 5).Clone(), - } - testutil.DoDBTesting(&t) - db.TestClose() - done <- true - }, 80.0) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := newTestingDB(o, nil, nil) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - err := db.TestPut(key, value) - Expect(err).NotTo(HaveOccurred()) - }) - - return db - }, func(db testutil.DB) { - db.(*testingDB).TestClose() - }) - }) - - Describe("transaction test", func() { - It("should do transaction correctly", func(done Done) { - db := newTestingDB(o, nil, nil) - - By("creating first transaction") - var err error - tr := &testingTransaction{} - tr.Transaction, err = db.OpenTransaction() - Expect(err).NotTo(HaveOccurred()) - t0 := &testutil.DBTesting{ - DB: tr, - Deleted: testutil.KeyValue_Generate(nil, 200, 1, 1, 50, 5, 5).Clone(), - } - testutil.DoDBTesting(t0) - testutil.TestGet(tr, t0.Present) - testutil.TestHas(tr, t0.Present) - - By("committing first transaction") - err = tr.Commit() - Expect(err).NotTo(HaveOccurred()) - testutil.TestIter(db, nil, t0.Present) - testutil.TestGet(db, t0.Present) - testutil.TestHas(db, t0.Present) - - By("manipulating DB without transaction") - t0.DB = db - testutil.DoDBTesting(t0) - - By("creating second transaction") - tr.Transaction, err = db.OpenTransaction() - Expect(err).NotTo(HaveOccurred()) - t1 := &testutil.DBTesting{ - DB: tr, - Deleted: t0.Deleted.Clone(), - Present: t0.Present.Clone(), - } - testutil.DoDBTesting(t1) - testutil.TestIter(db, nil, t0.Present) - - By("discarding second transaction") - tr.Discard() - testutil.TestIter(db, nil, t0.Present) - - By("creating third transaction") - tr.Transaction, err = db.OpenTransaction() - Expect(err).NotTo(HaveOccurred()) - t0.DB = tr - testutil.DoDBTesting(t0) - - By("committing third transaction") - err = tr.Commit() - Expect(err).NotTo(HaveOccurred()) - testutil.TestIter(db, nil, t0.Present) - - db.TestClose() - done <- true - }, 240.0) - }) - }) -}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go deleted file mode 100644 index 1fb56f07..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "encoding/binary" - "github.com/syndtr/goleveldb/leveldb/util" - "testing" -) - -type harness struct { - t *testing.T - - bloom Filter - generator FilterGenerator - filter []byte -} - -func newHarness(t *testing.T) *harness { - bloom := NewBloomFilter(10) - return &harness{ - t: t, - bloom: bloom, - generator: bloom.NewGenerator(), - } -} - -func (h *harness) add(key []byte) { - h.generator.Add(key) -} - -func (h *harness) addNum(key uint32) { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - h.add(b[:]) -} - -func (h *harness) build() { - b := &util.Buffer{} - h.generator.Generate(b) - h.filter = b.Bytes() -} - -func (h *harness) reset() { - h.filter = nil -} - -func (h *harness) filterLen() int { - return len(h.filter) -} - -func (h *harness) assert(key []byte, want, silent bool) bool { - got := h.bloom.Contains(h.filter, key) - if !silent && got != want { - h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) - } - return got -} - -func (h *harness) assertNum(key uint32, want, silent bool) bool { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - return h.assert(b[:], want, silent) -} - -func TestBloomFilter_Empty(t *testing.T) { - h := newHarness(t) - h.build() - h.assert([]byte("hello"), false, false) - h.assert([]byte("world"), false, false) -} - -func TestBloomFilter_Small(t *testing.T) { - h := newHarness(t) - h.add([]byte("hello")) - h.add([]byte("world")) - h.build() - h.assert([]byte("hello"), true, false) - h.assert([]byte("world"), true, false) - h.assert([]byte("x"), false, false) - h.assert([]byte("foo"), false, false) -} - -func nextN(n int) int { - switch { - case n < 10: - n += 1 - case n < 100: - n += 10 - case n < 1000: - n += 100 - default: - n += 1000 - } - return n -} - -func TestBloomFilter_VaryingLengths(t *testing.T) { - h := newHarness(t) - var mediocre, good int - for n := 1; n < 10000; n = nextN(n) { - h.reset() - for i := 0; i < n; i++ { - h.addNum(uint32(i)) - } - h.build() - - got := h.filterLen() - want := (n * 10 / 8) + 40 - if got > want { - t.Errorf("filter len test failed, '%d' > '%d'", got, want) - } - - for i := 0; i < n; i++ { - h.assertNum(uint32(i), true, false) - } - - var rate float32 - for i := 0; i < 10000; i++ { - if h.assertNum(uint32(i+1000000000), true, true) { - rate++ - } - } - rate /= 10000 - if rate > 0.02 { - t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) - } - if rate > 0.0125 { - mediocre++ - } else { - good++ - } - } - t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) - if mediocre > good/5 { - t.Error("mediocre false positive rate is more than expected") - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go deleted file mode 100644 index f16d0144..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Array iterator", func() { - It("Should iterates and seeks correctly", func() { - // Build key/value. - kv := testutil.KeyValue_Generate(nil, 70, 1, 1, 5, 3, 3) - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewArrayIterator(kv), - } - testutil.DoIteratorTesting(&t) - }) - }) -}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go deleted file mode 100644 index fde80167..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - "sort" - - . "github.com/onsi/ginkgo" - - "github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -type keyValue struct { - key []byte - testutil.KeyValue -} - -type keyValueIndex []keyValue - -func (x keyValueIndex) Search(key []byte) int { - return sort.Search(x.Len(), func(i int) bool { - return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 - }) -} - -func (x keyValueIndex) Len() int { return len(x) } -func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } -func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } - -var _ = testutil.Defer(func() { - Describe("Indexed iterator", func() { - Test := func(n ...int) func() { - if len(n) == 0 { - rnd := testutil.NewRand() - n = make([]int, rnd.Intn(17)+3) - for i := range n { - n[i] = rnd.Intn(19) + 1 - } - } - - return func() { - It("Should iterates and seeks correctly", func(done Done) { - // Build key/value. - index := make(keyValueIndex, len(n)) - sum := 0 - for _, x := range n { - sum += x - } - kv := testutil.KeyValue_Generate(nil, sum, 1, 1, 10, 4, 4) - for i, j := 0, 0; i < len(n); i++ { - for x := n[i]; x > 0; x-- { - key, value := kv.Index(j) - index[i].key = key - index[i].Put(key, value) - j++ - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewIndexedIterator(NewArrayIndexer(index), true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 15.0) - } - } - - Describe("with 100 keys", Test(100)) - Describe("with 50-50 keys", Test(50, 50)) - Describe("with 50-1 keys", Test(50, 1)) - Describe("with 50-1-50 keys", Test(50, 1, 50)) - Describe("with 1-50 keys", Test(1, 50)) - Describe("with random N-keys", Test()) - }) -}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go index 3b555327..96fb0f68 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go @@ -40,11 +40,11 @@ type IteratorSeeker interface { Seek(key []byte) bool // Next moves the iterator to the next key/value pair. - // It returns whether the iterator is exhausted. + // It returns false if the iterator is exhausted. Next() bool // Prev moves the iterator to the previous key/value pair. - // It returns whether the iterator is exhausted. + // It returns false if the iterator is exhausted. Prev() bool } @@ -88,7 +88,7 @@ type Iterator interface { // its contents may change on the next call to any 'seeks method'. Key() []byte - // Value returns the key of the current key/value pair, or nil if done. + // Value returns the value of the current key/value pair, or nil if done. // The caller should not modify the contents of the returned slice, and // its contents may change on the next call to any 'seeks method'. Value() []byte diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go deleted file mode 100644 index 5ef8d5ba..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package iterator_test - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestIterator(t *testing.T) { - testutil.RunSuite(t, "Iterator Suite") -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go deleted file mode 100644 index ee40881e..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Merged iterator", func() { - Test := func(filled int, empty int) func() { - return func() { - It("Should iterates and seeks correctly", func(done Done) { - rnd := testutil.NewRand() - - // Build key/value. - filledKV := make([]testutil.KeyValue, filled) - kv := testutil.KeyValue_Generate(nil, 100, 1, 1, 10, 4, 4) - kv.Iterate(func(i int, key, value []byte) { - filledKV[rnd.Intn(filled)].Put(key, value) - }) - - // Create itearators. - iters := make([]Iterator, filled+empty) - for i := range iters { - if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { - filled-- - Expect(filledKV[filled].Len()).ShouldNot(BeZero()) - iters[i] = NewArrayIterator(filledKV[filled]) - } else { - empty-- - iters[i] = NewEmptyIterator(nil) - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 15.0) - } - } - - Describe("with three, all filled iterators", Test(3, 0)) - Describe("with one filled, one empty iterators", Test(1, 1)) - Describe("with one filled, two empty iterators", Test(1, 2)) - }) -}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go deleted file mode 100644 index 0fcf2259..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go +++ /dev/null @@ -1,818 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -package journal - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math/rand" - "strings" - "testing" -) - -type dropper struct { - t *testing.T -} - -func (d dropper) Drop(err error) { - d.t.Log(err) -} - -func short(s string) string { - if len(s) < 64 { - return s - } - return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) -} - -// big returns a string of length n, composed of repetitions of partial. -func big(partial string, n int) string { - return strings.Repeat(partial, n/len(partial)+1)[:n] -} - -func TestEmpty(t *testing.T) { - buf := new(bytes.Buffer) - r := NewReader(buf, dropper{t}, true, true) - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { - buf := new(bytes.Buffer) - - reset() - w := NewWriter(buf) - for { - s, ok := gen() - if !ok { - break - } - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write([]byte(s)); err != nil { - t.Fatal(err) - } - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - reset() - r := NewReader(buf, dropper{t}, true, true) - for { - s, ok := gen() - if !ok { - break - } - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - x, err := ioutil.ReadAll(rr) - if err != nil { - t.Fatal(err) - } - if string(x) != s { - t.Fatalf("got %q, want %q", short(string(x)), short(s)) - } - } - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testLiterals(t *testing.T, s []string) { - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == len(s) { - return "", false - } - i++ - return s[i-1], true - } - testGenerator(t, reset, gen) -} - -func TestMany(t *testing.T) { - const n = 1e5 - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return fmt.Sprintf("%d.", i-1), true - } - testGenerator(t, reset, gen) -} - -func TestRandom(t *testing.T) { - const n = 1e2 - var ( - i int - r *rand.Rand - ) - reset := func() { - i, r = 0, rand.New(rand.NewSource(0)) - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true - } - testGenerator(t, reset, gen) -} - -func TestBasic(t *testing.T) { - testLiterals(t, []string{ - strings.Repeat("a", 1000), - strings.Repeat("b", 97270), - strings.Repeat("c", 8000), - }) -} - -func TestBoundary(t *testing.T) { - for i := blockSize - 16; i < blockSize+16; i++ { - s0 := big("abcd", i) - for j := blockSize - 16; j < blockSize+16; j++ { - s1 := big("ABCDE", j) - testLiterals(t, []string{s0, s1}) - testLiterals(t, []string{s0, "", s1}) - testLiterals(t, []string{s0, "x", s1}) - } - } -} - -func TestFlush(t *testing.T) { - buf := new(bytes.Buffer) - w := NewWriter(buf) - // Write a couple of records. Everything should still be held - // in the record.Writer buffer, so that buf.Len should be 0. - w0, _ := w.Next() - w0.Write([]byte("0")) - w1, _ := w.Next() - w1.Write([]byte("11")) - if got, want := buf.Len(), 0; got != want { - t.Fatalf("buffer length #0: got %d want %d", got, want) - } - // Flush the record.Writer buffer, which should yield 17 bytes. - // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #1: got %d want %d", got, want) - } - // Do another write, one that isn't large enough to complete the block. - // The write should not have flowed through to buf. - w2, _ := w.Next() - w2.Write(bytes.Repeat([]byte("2"), 10000)) - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #2: got %d want %d", got, want) - } - // Flushing should get us up to 10024 bytes written. - // 10024 = 17 + 7 + 10000. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 10024; got != want { - t.Fatalf("buffer length #3: got %d want %d", got, want) - } - // Do a bigger write, one that completes the current block. - // We should now have 32768 bytes (a complete block), without - // an explicit flush. - w3, _ := w.Next() - w3.Write(bytes.Repeat([]byte("3"), 40000)) - if got, want := buf.Len(), 32768; got != want { - t.Fatalf("buffer length #4: got %d want %d", got, want) - } - // Flushing should get us up to 50038 bytes written. - // 50038 = 10024 + 2*7 + 40000. There are two headers because - // the one record was split into two chunks. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 50038; got != want { - t.Fatalf("buffer length #5: got %d want %d", got, want) - } - // Check that reading those records give the right lengths. - r := NewReader(buf, dropper{t}, true, true) - wants := []int64{1, 2, 10000, 40000} - for i, want := range wants { - rr, _ := r.Next() - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #%d: %v", i, err) - } - if n != want { - t.Fatalf("read #%d: got %d bytes want %d", i, n, want) - } - } -} - -func TestNonExhaustiveRead(t *testing.T) { - const n = 100 - buf := new(bytes.Buffer) - p := make([]byte, 10) - rnd := rand.New(rand.NewSource(1)) - - w := NewWriter(buf) - for i := 0; i < n; i++ { - length := len(p) + rnd.Intn(3*blockSize) - s := string(uint8(i)) + "123456789abcdefgh" - ww, _ := w.Next() - ww.Write([]byte(big(s, length))) - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - for i := 0; i < n; i++ { - rr, _ := r.Next() - _, err := io.ReadFull(rr, p) - if err != nil { - t.Fatal(err) - } - want := string(uint8(i)) + "123456789" - if got := string(p); got != want { - t.Fatalf("read #%d: got %q want %q", i, got, want) - } - } -} - -func TestStaleReader(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w0.Write([]byte("0")) - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1.Write([]byte("11")) - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - r0, err := r.Next() - if err != nil { - t.Fatal(err) - } - r1, err := r.Next() - if err != nil { - t.Fatal(err) - } - p := make([]byte, 1) - if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale read #0: unexpected error: %v", err) - } - if _, err := r1.Read(p); err != nil { - t.Fatalf("fresh read #1: got %v want nil error", err) - } - if p[0] != '1' { - t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) - } -} - -func TestStaleWriter(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #0: unexpected error: %v", err) - } - if _, err := w1.Write([]byte("11")); err != nil { - t.Fatalf("fresh write #1: got %v want nil error", err) - } - if err := w.Flush(); err != nil { - t.Fatalf("flush: %v", err) - } - if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #1: unexpected error: %v", err) - } -} - -func TestCorrupt_MissingLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Cut the last block. - b := buf.Bytes()[:blockSize] - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read. - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if n != blockSize-1024 { - t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) - } - - // Second read. - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedFirstBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #0. - for i := 0; i < 1024; i++ { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (third record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #1. - for i := 0; i < 1024; i++ { - b[blockSize+i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - // Third read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #3. - for i := len(b) - 1; i > len(b)-1024; i-- { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize - headerSize); n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - // Third read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - // Fourth read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #3: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize/2 + headerSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/key_test.go deleted file mode 100644 index 2f33ccb8..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/key_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -var defaultIComparer = &iComparer{comparer.DefaultComparer} - -func ikey(key string, seq uint64, kt keyType) internalKey { - return makeInternalKey(nil, []byte(key), uint64(seq), kt) -} - -func shortSep(a, b []byte) []byte { - dst := make([]byte, len(a)) - dst = defaultIComparer.Separator(dst[:0], a, b) - if dst == nil { - return a - } - return dst -} - -func shortSuccessor(b []byte) []byte { - dst := make([]byte, len(b)) - dst = defaultIComparer.Successor(dst[:0], b) - if dst == nil { - return b - } - return dst -} - -func testSingleKey(t *testing.T, key string, seq uint64, kt keyType) { - ik := ikey(key, seq, kt) - - if !bytes.Equal(ik.ukey(), []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - - rseq, rt := ik.parseNum() - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - - if rukey, rseq, rt, kerr := parseInternalKey(ik); kerr == nil { - if !bytes.Equal(rukey, []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - } else { - t.Errorf("key error: %v", kerr) - } -} - -func TestInternalKey_EncodeDecode(t *testing.T) { - keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} - seqs := []uint64{ - 1, 2, 3, - (1 << 8) - 1, 1 << 8, (1 << 8) + 1, - (1 << 16) - 1, 1 << 16, (1 << 16) + 1, - (1 << 32) - 1, 1 << 32, (1 << 32) + 1, - } - for _, key := range keys { - for _, seq := range seqs { - testSingleKey(t, key, seq, keyTypeVal) - testSingleKey(t, "hello", 1, keyTypeDel) - } - } -} - -func assertBytes(t *testing.T, want, got []byte) { - if !bytes.Equal(got, want) { - t.Errorf("assert failed, got %v, want %v", got, want) - } -} - -func TestInternalKeyShortSeparator(t *testing.T) { - // When user keys are same - assertBytes(t, ikey("foo", 100, keyTypeVal), - shortSep(ikey("foo", 100, keyTypeVal), - ikey("foo", 99, keyTypeVal))) - assertBytes(t, ikey("foo", 100, keyTypeVal), - shortSep(ikey("foo", 100, keyTypeVal), - ikey("foo", 101, keyTypeVal))) - assertBytes(t, ikey("foo", 100, keyTypeVal), - shortSep(ikey("foo", 100, keyTypeVal), - ikey("foo", 100, keyTypeVal))) - assertBytes(t, ikey("foo", 100, keyTypeVal), - shortSep(ikey("foo", 100, keyTypeVal), - ikey("foo", 100, keyTypeDel))) - - // When user keys are misordered - assertBytes(t, ikey("foo", 100, keyTypeVal), - shortSep(ikey("foo", 100, keyTypeVal), - ikey("bar", 99, keyTypeVal))) - - // When user keys are different, but correctly ordered - assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek), - shortSep(ikey("foo", 100, keyTypeVal), - ikey("hello", 200, keyTypeVal))) - - // When start user key is prefix of limit user key - assertBytes(t, ikey("foo", 100, keyTypeVal), - shortSep(ikey("foo", 100, keyTypeVal), - ikey("foobar", 200, keyTypeVal))) - - // When limit user key is prefix of start user key - assertBytes(t, ikey("foobar", 100, keyTypeVal), - shortSep(ikey("foobar", 100, keyTypeVal), - ikey("foo", 200, keyTypeVal))) -} - -func TestInternalKeyShortestSuccessor(t *testing.T) { - assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek), - shortSuccessor(ikey("foo", 100, keyTypeVal))) - assertBytes(t, ikey("\xff\xff", 100, keyTypeVal), - shortSuccessor(ikey("\xff\xff", 100, keyTypeVal))) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go deleted file mode 100644 index fefa007a..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package leveldb - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestLevelDB(t *testing.T) { - testutil.RunSuite(t, "LevelDB Suite") -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go deleted file mode 100644 index b05084ca..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - "encoding/binary" - "math/rand" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -func BenchmarkPut(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkPutRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkGet(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := range buf { - p.Get(buf[i][:]) - } -} - -func BenchmarkGetRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - p.Get(buf[rand.Int()%b.N][:]) - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go index 18a19ed4..824e47f5 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go @@ -329,7 +329,7 @@ func (p *DB) Delete(key []byte) error { h := p.nodeData[node+nHeight] for i, n := range p.prevNode[:h] { - m := n + 4 + i + m := n + nNext + i p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] } @@ -397,6 +397,10 @@ func (p *DB) Find(key []byte) (rkey, value []byte, err error) { // DB. And a nil Range.Limit is treated as a key after all keys in // the DB. // +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// // The iterator must be released after use, by calling Release method. // // Also read Iterator documentation of the leveldb/iterator package. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go deleted file mode 100644 index 18c304b7..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package memdb - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestMemDB(t *testing.T) { - testutil.RunSuite(t, "MemDB Suite") -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go deleted file mode 100644 index 3f0a31e4..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLT(key); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestFindLast() (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLast(); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestPut(key []byte, value []byte) error { - p.Put(key, value) - return nil -} - -func (p *DB) TestDelete(key []byte) error { - p.Delete(key) - return nil -} - -func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return p.Find(key) -} - -func (p *DB) TestGet(key []byte) (value []byte, err error) { - return p.Get(key) -} - -func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { - return p.NewIterator(slice) -} - -var _ = testutil.Defer(func() { - Describe("Memdb", func() { - Describe("write test", func() { - It("should do write correctly", func() { - db := New(comparer.DefaultComparer, 0) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 1, 30, 5, 5).Clone(), - PostFn: func(t *testutil.DBTesting) { - Expect(db.Len()).Should(Equal(t.Present.Len())) - Expect(db.Size()).Should(Equal(t.Present.Size())) - switch t.Act { - case testutil.DBPut, testutil.DBOverwrite: - Expect(db.Contains(t.ActKey)).Should(BeTrue()) - default: - Expect(db.Contains(t.ActKey)).Should(BeFalse()) - } - }, - } - testutil.DoDBTesting(&t) - }) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := New(comparer.DefaultComparer, 0) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - db.Put(key, value) - }) - - if kv.Len() > 1 { - It("Should find correct keys with findLT", func() { - testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { - key_, key, _ := kv.IndexInexact(i + 1) - expectedKey, expectedValue := kv.Index(i) - - // Using key that exist. - rkey, rvalue, err := db.TestFindLT(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) - Expect(rkey).Should(Equal(expectedKey), "Key") - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) - - // Using key that doesn't exist. - rkey, rvalue, err = db.TestFindLT(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) - Expect(rkey).Should(Equal(expectedKey)) - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) - }) - }) - } - - if kv.Len() > 0 { - It("Should find last key with findLast", func() { - key, value := kv.Index(kv.Len() - 1) - rkey, rvalue, err := db.TestFindLast() - Expect(err).ShouldNot(HaveOccurred()) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value)) - }) - } - - return db - }, nil, nil) - }) - }) -}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go index 44e7d9ad..528b1642 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go @@ -158,6 +158,12 @@ type Options struct { // The default value is 8MiB. BlockCacheCapacity int + // BlockCacheEvictRemoved allows enable forced-eviction on cached block belonging + // to removed 'sorted table'. + // + // The default if false. + BlockCacheEvictRemoved bool + // BlockRestartInterval is the number of keys between restart points for // delta encoding of keys. // @@ -384,6 +390,13 @@ func (o *Options) GetBlockCacheCapacity() int { return o.BlockCacheCapacity } +func (o *Options) GetBlockCacheEvictRemoved() bool { + if o == nil { + return false + } + return o.BlockCacheEvictRemoved +} + func (o *Options) GetBlockRestartInterval() int { if o == nil || o.BlockRestartInterval <= 0 { return DefaultBlockRestartInterval diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go index ad68a870..3f391f93 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go @@ -42,7 +42,7 @@ type session struct { stTempFileNum int64 stSeqNum uint64 // last mem compacted seq; need external synchronization - stor storage.Storage + stor *iStorage storLock storage.Locker o *cachedOptions icmp *iComparer @@ -68,7 +68,7 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { return } s = &session{ - stor: stor, + stor: newIStorage(stor), storLock: storLock, fileRef: make(map[int64]int), } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record_test.go deleted file mode 100644 index 5af399f0..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session_record_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" -) - -func decodeEncode(v *sessionRecord) (res bool, err error) { - b := new(bytes.Buffer) - err = v.encode(b) - if err != nil { - return - } - v2 := &sessionRecord{} - err = v.decode(b) - if err != nil { - return - } - b2 := new(bytes.Buffer) - err = v2.encode(b2) - if err != nil { - return - } - return bytes.Equal(b.Bytes(), b2.Bytes()), nil -} - -func TestSessionRecord_EncodeDecode(t *testing.T) { - big := int64(1) << 50 - v := &sessionRecord{} - i := int64(0) - test := func() { - res, err := decodeEncode(v) - if err != nil { - t.Fatalf("error when testing encode/decode sessionRecord: %v", err) - } - if !res { - t.Error("encode/decode test failed at iteration:", i) - } - } - - for ; i < 4; i++ { - test() - v.addTable(3, big+300+i, big+400+i, - makeInternalKey(nil, []byte("foo"), uint64(big+500+1), keyTypeVal), - makeInternalKey(nil, []byte("zoo"), uint64(big+600+1), keyTypeDel)) - v.delTable(4, big+700+i) - v.addCompPtr(int(i), makeInternalKey(nil, []byte("x"), uint64(big+900+1), keyTypeVal)) - } - - v.setComparer("foo") - v.setJournalNum(big + 100) - v.setPrevJournalNum(big + 99) - v.setNextFileNum(big + 200) - v.setSeqNum(uint64(big + 1000)) - test() -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go index 92328933..40cb2cf9 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go @@ -36,7 +36,7 @@ func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf func (s *session) newTemp() storage.FileDesc { num := atomic.AddInt64(&s.stTempFileNum, 1) - 1 - return storage.FileDesc{storage.TypeTemp, num} + return storage.FileDesc{Type: storage.TypeTemp, Num: num} } func (s *session) addFileRef(fd storage.FileDesc, ref int) int { @@ -190,7 +190,7 @@ func (s *session) recordCommited(rec *sessionRecord) { // Create a new manifest file; need external synchronization. func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()} + fd := storage.FileDesc{Type: storage.TypeManifest, Num: s.allocFileNum()} writer, err := s.stor.Create(fd) if err != nil { return diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go new file mode 100644 index 00000000..d45fb5df --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go @@ -0,0 +1,63 @@ +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/storage" + "sync/atomic" +) + +type iStorage struct { + storage.Storage + read uint64 + write uint64 +} + +func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) { + r, err := c.Storage.Open(fd) + return &iStorageReader{r, c}, err +} + +func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) { + w, err := c.Storage.Create(fd) + return &iStorageWriter{w, c}, err +} + +func (c *iStorage) reads() uint64 { + return atomic.LoadUint64(&c.read) +} + +func (c *iStorage) writes() uint64 { + return atomic.LoadUint64(&c.write) +} + +// newIStorage returns the given storage wrapped by iStorage. +func newIStorage(s storage.Storage) *iStorage { + return &iStorage{s, 0, 0} +} + +type iStorageReader struct { + storage.Reader + c *iStorage +} + +func (r *iStorageReader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + atomic.AddUint64(&r.c.read, uint64(n)) + return n, err +} + +func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) { + n, err = r.Reader.ReadAt(p, off) + atomic.AddUint64(&r.c.read, uint64(n)) + return n, err +} + +type iStorageWriter struct { + storage.Writer + c *iStorage +} + +func (w *iStorageWriter) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + atomic.AddUint64(&w.c.write, uint64(n)) + return n, err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go index 1189deca..9ba71fd6 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go @@ -9,10 +9,12 @@ package storage import ( "errors" "fmt" + "io" "io/ioutil" "os" "path/filepath" "runtime" + "sort" "strconv" "strings" "sync" @@ -42,6 +44,30 @@ func (lock *fileStorageLock) Unlock() { } } +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func writeFileSynced(filename string, data []byte, perm os.FileMode) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Sync(); err == nil { + err = err1 + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + const logSizeThreshold = 1024 * 1024 // 1 MiB // fileStorage is a file-system backed storage. @@ -60,7 +86,7 @@ type fileStorage struct { day int } -// OpenFile returns a new filesytem-backed storage implementation with the given +// OpenFile returns a new filesystem-backed storage implementation with the given // path. This also acquire a file lock, so any subsequent attempt to open the // same path will fail. // @@ -189,7 +215,8 @@ func (fs *fileStorage) doLog(t time.Time, str string) { // write fs.buf = append(fs.buf, []byte(str)...) fs.buf = append(fs.buf, '\n') - fs.logw.Write(fs.buf) + n, _ := fs.logw.Write(fs.buf) + fs.logSize += int64(n) } func (fs *fileStorage) Log(str string) { @@ -210,7 +237,46 @@ func (fs *fileStorage) log(str string) { } } -func (fs *fileStorage) SetMeta(fd FileDesc) (err error) { +func (fs *fileStorage) setMeta(fd FileDesc) error { + content := fsGenName(fd) + "\n" + // Check and backup old CURRENT file. + currentPath := filepath.Join(fs.path, "CURRENT") + if _, err := os.Stat(currentPath); err == nil { + b, err := ioutil.ReadFile(currentPath) + if err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + if string(b) == content { + // Content not changed, do nothing. + return nil + } + if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + } else if !os.IsNotExist(err) { + return err + } + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) + if err := writeFileSynced(path, []byte(content), 0644); err != nil { + fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err)) + return err + } + // Replace CURRENT file. + if err := rename(path, currentPath); err != nil { + fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err)) + return err + } + // Sync root directory. + if err := syncDir(fs.path); err != nil { + fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + return nil +} + +func (fs *fileStorage) SetMeta(fd FileDesc) error { if !FileDescOk(fd) { return ErrInvalidFile } @@ -223,44 +289,10 @@ func (fs *fileStorage) SetMeta(fd FileDesc) (err error) { if fs.open < 0 { return ErrClosed } - defer func() { - if err != nil { - fs.log(fmt.Sprintf("CURRENT: %v", err)) - } - }() - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) - w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return - } - _, err = fmt.Fprintln(w, fsGenName(fd)) - if err != nil { - fs.log(fmt.Sprintf("write CURRENT.%d: %v", fd.Num, err)) - return - } - if err = w.Sync(); err != nil { - fs.log(fmt.Sprintf("flush CURRENT.%d: %v", fd.Num, err)) - return - } - if err = w.Close(); err != nil { - fs.log(fmt.Sprintf("close CURRENT.%d: %v", fd.Num, err)) - return - } - if err != nil { - return - } - if err = rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err)) - return - } - // Sync root directory. - if err = syncDir(fs.path); err != nil { - fs.log(fmt.Sprintf("syncDir: %v", err)) - } - return + return fs.setMeta(fd) } -func (fs *fileStorage) GetMeta() (fd FileDesc, err error) { +func (fs *fileStorage) GetMeta() (FileDesc, error) { fs.mu.Lock() defer fs.mu.Unlock() if fs.open < 0 { @@ -268,7 +300,7 @@ func (fs *fileStorage) GetMeta() (fd FileDesc, err error) { } dir, err := os.Open(fs.path) if err != nil { - return + return FileDesc{}, err } names, err := dir.Readdirnames(0) // Close the dir first before checking for Readdirnames error. @@ -276,94 +308,134 @@ func (fs *fileStorage) GetMeta() (fd FileDesc, err error) { fs.log(fmt.Sprintf("close dir: %v", ce)) } if err != nil { - return - } - // Find latest CURRENT file. - var rem []string - var pend bool - var cerr error - for _, name := range names { - if strings.HasPrefix(name, "CURRENT") { - pend1 := len(name) > 7 - var pendNum int64 - // Make sure it is valid name for a CURRENT file, otherwise skip it. - if pend1 { - if name[7] != '.' || len(name) < 9 { - fs.log(fmt.Sprintf("skipping %s: invalid file name", name)) - continue - } - var e1 error - if pendNum, e1 = strconv.ParseInt(name[8:], 10, 0); e1 != nil { - fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", name, e1)) - continue - } + return FileDesc{}, err + } + // Try this in order: + // - CURRENT.[0-9]+ ('pending rename' file, descending order) + // - CURRENT + // - CURRENT.bak + // + // Skip corrupted file or file that point to a missing target file. + type currentFile struct { + name string + fd FileDesc + } + tryCurrent := func(name string) (*currentFile, error) { + b, err := ioutil.ReadFile(filepath.Join(fs.path, name)) + if err != nil { + if os.IsNotExist(err) { + err = os.ErrNotExist } - path := filepath.Join(fs.path, name) - r, e1 := os.OpenFile(path, os.O_RDONLY, 0) - if e1 != nil { - return FileDesc{}, e1 + return nil, err + } + var fd FileDesc + if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) { + fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b)) + err := &ErrCorrupted{ + Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"), } - b, e1 := ioutil.ReadAll(r) - if e1 != nil { - r.Close() - return FileDesc{}, e1 + return nil, err + } + if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil { + if os.IsNotExist(err) { + fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd)) + err = os.ErrNotExist } - var fd1 FileDesc - if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd1) { - fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", name)) - if pend1 { - rem = append(rem, name) - } - if !pend1 || cerr == nil { - metaFd, _ := fsParseName(name) - cerr = &ErrCorrupted{ - Fd: metaFd, - Err: errors.New("leveldb/storage: corrupted or incomplete meta file"), - } - } - } else if pend1 && pendNum != fd1.Num { - fs.log(fmt.Sprintf("skipping %s: inconsistent pending-file num: %d vs %d", name, pendNum, fd1.Num)) - rem = append(rem, name) - } else if fd1.Num < fd.Num { - fs.log(fmt.Sprintf("skipping %s: obsolete", name)) - if pend1 { - rem = append(rem, name) - } + return nil, err + } + return ¤tFile{name: name, fd: fd}, nil + } + tryCurrents := func(names []string) (*currentFile, error) { + var ( + cur *currentFile + // Last corruption error. + lastCerr error + ) + for _, name := range names { + var err error + cur, err = tryCurrent(name) + if err == nil { + break + } else if err == os.ErrNotExist { + // Fallback to the next file. + } else if isCorrupted(err) { + lastCerr = err + // Fallback to the next file. } else { - fd = fd1 - pend = pend1 + // In case the error is due to permission, etc. + return nil, err } - if err := r.Close(); err != nil { - fs.log(fmt.Sprintf("close %s: %v", name, err)) + } + if cur == nil { + err := os.ErrNotExist + if lastCerr != nil { + err = lastCerr } + return nil, err } + return cur, nil } - // Don't remove any files if there is no valid CURRENT file. - if fd.Zero() { - if cerr != nil { - err = cerr - } else { - err = os.ErrNotExist + + // Try 'pending rename' files. + var nums []int64 + for _, name := range names { + if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" { + i, err := strconv.ParseInt(name[8:], 10, 64) + if err == nil { + nums = append(nums, i) + } } - return } - if !fs.readOnly { - // Rename pending CURRENT file to an effective CURRENT. - if pend { - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) - if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", fd.Num, err)) - } + var ( + pendCur *currentFile + pendErr = os.ErrNotExist + pendNames []string + ) + if len(nums) > 0 { + sort.Sort(sort.Reverse(int64Slice(nums))) + pendNames = make([]string, len(nums)) + for i, num := range nums { + pendNames[i] = fmt.Sprintf("CURRENT.%d", num) } - // Remove obsolete or incomplete pending CURRENT files. - for _, name := range rem { - path := filepath.Join(fs.path, name) - if err := os.Remove(path); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", name, err)) + pendCur, pendErr = tryCurrents(pendNames) + if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + } + + // Try CURRENT and CURRENT.bak. + curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"}) + if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) { + return FileDesc{}, curErr + } + + // pendCur takes precedence, but guards against obsolete pendCur. + if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) { + curCur = pendCur + } + + if curCur != nil { + // Restore CURRENT file to proper state. + if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) { + // Ignore setMeta errors, however don't delete obsolete files if we + // catch error. + if err := fs.setMeta(curCur.fd); err == nil { + // Remove 'pending rename' files. + for _, name := range pendNames { + if err := os.Remove(filepath.Join(fs.path, name)); err != nil { + fs.log(fmt.Sprintf("remove %s: %v", name, err)) + } + } } } + return curCur.fd, nil } - return + + // Nothing found. + if isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + return FileDesc{}, curErr } func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go index bab62bfc..b8297980 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go @@ -8,7 +8,6 @@ package storage import ( "os" - "path/filepath" ) type plan9FileLock struct { @@ -48,8 +47,7 @@ func rename(oldpath, newpath string) error { } } - _, fname := filepath.Split(newpath) - return os.Rename(oldpath, fname) + return os.Rename(oldpath, newpath) } func syncDir(name string) error { diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go deleted file mode 100644 index 28a59ecc..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "fmt" - "os" - "path/filepath" - "testing" -) - -var cases = []struct { - oldName []string - name string - ftype FileType - num int64 -}{ - {nil, "000100.log", TypeJournal, 100}, - {nil, "000000.log", TypeJournal, 0}, - {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, - {nil, "MANIFEST-000002", TypeManifest, 2}, - {nil, "MANIFEST-000007", TypeManifest, 7}, - {nil, "9223372036854775807.log", TypeJournal, 9223372036854775807}, - {nil, "000100.tmp", TypeTemp, 100}, -} - -var invalidCases = []string{ - "", - "foo", - "foo-dx-100.log", - ".log", - "", - "manifest", - "CURREN", - "CURRENTX", - "MANIFES", - "MANIFEST", - "MANIFEST-", - "XMANIFEST-3", - "MANIFEST-3x", - "LOC", - "LOCKx", - "LO", - "LOGx", - "18446744073709551616.log", - "184467440737095516150.log", - "100", - "100.", - "100.lop", -} - -func TestFileStorage_CreateFileName(t *testing.T) { - for _, c := range cases { - if name := fsGenName(FileDesc{c.ftype, c.num}); name != c.name { - t.Errorf("invalid filename got '%s', want '%s'", name, c.name) - } - } -} - -func TestFileStorage_ParseFileName(t *testing.T) { - for _, c := range cases { - for _, name := range append([]string{c.name}, c.oldName...) { - fd, ok := fsParseName(name) - if !ok { - t.Errorf("cannot parse filename '%s'", name) - continue - } - if fd.Type != c.ftype { - t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, fd.Type, c.ftype) - } - if fd.Num != c.num { - t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, fd.Num, c.num) - } - } - } -} - -func TestFileStorage_InvalidFileName(t *testing.T) { - for _, name := range invalidCases { - if fsParseNamePtr(name, nil) { - t.Errorf("filename '%s' should be invalid", name) - } - } -} - -func TestFileStorage_Locking(t *testing.T) { - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrwlock-%d", os.Getuid())) - if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) { - t.Fatal("RemoveAll: got error: ", err) - } - defer os.RemoveAll(path) - - p1, err := OpenFile(path, false) - if err != nil { - t.Fatal("OpenFile(1): got error: ", err) - } - - p2, err := OpenFile(path, false) - if err != nil { - t.Logf("OpenFile(2): got error: %s (expected)", err) - } else { - p2.Close() - p1.Close() - t.Fatal("OpenFile(2): expect error") - } - - p1.Close() - - p3, err := OpenFile(path, false) - if err != nil { - t.Fatal("OpenFile(3): got error: ", err) - } - defer p3.Close() - - l, err := p3.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = p3.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Unlock() - _, err = p3.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } -} - -func TestFileStorage_ReadOnlyLocking(t *testing.T) { - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrolock-%d", os.Getuid())) - if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) { - t.Fatal("RemoveAll: got error: ", err) - } - defer os.RemoveAll(path) - - p1, err := OpenFile(path, false) - if err != nil { - t.Fatal("OpenFile(1): got error: ", err) - } - - _, err = OpenFile(path, true) - if err != nil { - t.Logf("OpenFile(2): got error: %s (expected)", err) - } else { - t.Fatal("OpenFile(2): expect error") - } - - p1.Close() - - p3, err := OpenFile(path, true) - if err != nil { - t.Fatal("OpenFile(3): got error: ", err) - } - - p4, err := OpenFile(path, true) - if err != nil { - t.Fatal("OpenFile(4): got error: ", err) - } - - _, err = OpenFile(path, false) - if err != nil { - t.Logf("OpenFile(5): got error: %s (expected)", err) - } else { - t.Fatal("OpenFile(2): expect error") - } - - p3.Close() - p4.Close() -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go index 7e299153..d75f66a9 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go @@ -67,13 +67,25 @@ func isErrInvalid(err error) bool { if err == os.ErrInvalid { return true } + // Go < 1.8 if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL { return true } + // Go >= 1.8 returns *os.PathError instead + if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL { + return true + } return false } func syncDir(name string) error { + // As per fsync manpage, Linux seems to expect fsync on directory, however + // some system don't support this, so we will ignore syscall.EINVAL. + // + // From fsync(2): + // Calling fsync() does not necessarily ensure that the entry in the + // directory containing the file has also reached disk. For that an + // explicit fsync() on a file descriptor for the directory is also needed. f, err := os.Open(name) if err != nil { return err diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go index 9b0421f0..838f1bee 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go @@ -12,7 +12,11 @@ import ( "sync" ) -const typeShift = 3 +const typeShift = 4 + +// Verify at compile-time that typeShift is large enough to cover all FileType +// values by confirming that 0 == 0. +var _ [0]struct{} = [TypeAll >> typeShift]struct{}{} type memStorageLock struct { ms *memStorage @@ -143,7 +147,7 @@ func (ms *memStorage) Remove(fd FileDesc) error { } func (ms *memStorage) Rename(oldfd, newfd FileDesc) error { - if FileDescOk(oldfd) || FileDescOk(newfd) { + if !FileDescOk(oldfd) || !FileDescOk(newfd) { return ErrInvalidFile } if oldfd == newfd { diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go deleted file mode 100644 index fb03d301..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "testing" -) - -func TestMemStorage(t *testing.T) { - m := NewMemStorage() - - l, err := m.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = m.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Unlock() - _, err = m.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } - - w, err := m.Create(FileDesc{TypeTable, 1}) - if err != nil { - t.Fatal("Storage.Create: ", err) - } - w.Write([]byte("abc")) - w.Close() - if fds, _ := m.List(TypeAll); len(fds) != 1 { - t.Fatal("invalid GetFiles len") - } - buf := new(bytes.Buffer) - r, err := m.Open(FileDesc{TypeTable, 1}) - if err != nil { - t.Fatal("Open: got error: ", err) - } - buf.ReadFrom(r) - r.Close() - if got := buf.String(); got != "abc" { - t.Fatalf("Read: invalid value, want=abc got=%s", got) - } - if _, err := m.Open(FileDesc{TypeTable, 1}); err != nil { - t.Fatal("Open: got error: ", err) - } - if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil { - t.Fatal("expecting error") - } - m.Remove(FileDesc{TypeTable, 1}) - if fds, _ := m.List(TypeAll); len(fds) != 0 { - t.Fatal("invalid GetFiles len", len(fds)) - } - if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil { - t.Fatal("expecting error") - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go index c16bce6b..4e4a7242 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go @@ -55,6 +55,14 @@ type ErrCorrupted struct { Err error } +func isCorrupted(err error) bool { + switch err.(type) { + case *ErrCorrupted: + return true + } + return false +} + func (e *ErrCorrupted) Error() string { if !e.Fd.Zero() { return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go index 81d18a53..1fac60d0 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go @@ -78,7 +78,7 @@ func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFil } func tableFileFromRecord(r atRecord) *tFile { - return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax) + return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax) } // tFiles hold multiple tFile. @@ -290,16 +290,17 @@ func (x *tFilesSortByNum) Less(i, j int) bool { // Table operations. type tOps struct { - s *session - noSync bool - cache *cache.Cache - bcache *cache.Cache - bpool *util.BufferPool + s *session + noSync bool + evictRemoved bool + cache *cache.Cache + bcache *cache.Cache + bpool *util.BufferPool } // Creates an empty table and returns table writer. func (t *tOps) create() (*tWriter, error) { - fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()} + fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()} fw, err := t.s.stor.Create(fd) if err != nil { return nil, err @@ -422,7 +423,7 @@ func (t *tOps) remove(f *tFile) { } else { t.s.logf("table@remove removed @%d", f.fd.Num) } - if t.bcache != nil { + if t.evictRemoved && t.bcache != nil { t.bcache.EvictNS(uint64(f.fd.Num)) } }) @@ -451,7 +452,7 @@ func newTableOps(s *session) *tOps { if !s.o.GetDisableBlockCache() { var bcacher cache.Cacher if s.o.GetBlockCacheCapacity() > 0 { - bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity()) + bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) } bcache = cache.NewCache(bcacher) } @@ -459,11 +460,12 @@ func newTableOps(s *session) *tOps { bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) } return &tOps{ - s: s, - noSync: s.o.GetNoSync(), - cache: cache.NewCache(cacher), - bcache: bcache, - bpool: bpool, + s: s, + noSync: s.o.GetNoSync(), + evictRemoved: s.o.GetBlockCacheEvictRemoved(), + cache: cache.NewCache(cacher), + bcache: bcache, + bpool: bpool, } } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go deleted file mode 100644 index 00e6f9ee..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type blockTesting struct { - tr *Reader - b *block -} - -func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.tr.newBlockIter(t.b, nil, slice, false) -} - -var _ = testutil.Defer(func() { - Describe("Block", func() { - Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting { - // Building the block. - bw := &blockWriter{ - restartInterval: restartInterval, - scratch: make([]byte, 30), - } - kv.Iterate(func(i int, key, value []byte) { - bw.append(key, value) - }) - bw.finish() - - // Opening the block. - data := bw.buf.Bytes() - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - return &blockTesting{ - tr: &Reader{cmp: comparer.DefaultComparer}, - b: &block{ - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - }, - } - } - - Describe("read test", func() { - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - kv := &testutil.KeyValue{} - Text := func() string { - return fmt.Sprintf("and %d keys", kv.Len()) - } - - Test := func() { - // Make block. - br := Build(kv, restartInterval) - // Do testing. - testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil) - } - - Describe(Text(), Test) - - kv.PutString("", "empty") - Describe(Text(), Test) - - kv.PutString("a1", "foo") - Describe(Text(), Test) - - kv.PutString("a2", "v") - Describe(Text(), Test) - - kv.PutString("a3qqwrkks", "hello") - Describe(Text(), Test) - - kv.PutString("a4", "bar") - Describe(Text(), Test) - - kv.PutString("a5111111", "v5") - kv.PutString("a6", "") - kv.PutString("a7", "v7") - kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") - kv.PutString("b", "v9") - kv.PutString("c9", "v9") - kv.PutString("c91", "v9") - kv.PutString("d0", "v9") - Describe(Text(), Test) - }) - } - }) - - Describe("out-of-bound slice test", func() { - kv := &testutil.KeyValue{} - kv.PutString("k1", "v1") - kv.PutString("k2", "v2") - kv.PutString("k3abcdefgg", "v3") - kv.PutString("k4", "v4") - kv.PutString("k5", "v5") - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - // Make block. - bt := Build(kv, restartInterval) - - Test := func(r *util.Range) func(done Done) { - return func(done Done) { - iter := bt.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: iter, - } - - testutil.DoIteratorTesting(&t) - iter.Release() - done <- true - } - } - - It("Should do iterations and seeks correctly #0", - Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) - - It("Should do iterations and seeks correctly #1", - Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) - }) - } - }) - }) -}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go index 16cfbaa0..496feb6f 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go @@ -787,6 +787,10 @@ func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChe // table. And a nil Range.Limit is treated as a key after all keys in // the table. // +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// // The returned iterator is not safe for concurrent use and should be released // after use. // diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go deleted file mode 100644 index 6465da6e..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package table - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestTable(t *testing.T) { - testutil.RunSuite(t, "Table Suite") -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go deleted file mode 100644 index 232efcdf..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "bytes" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type tableWrapper struct { - *Reader -} - -func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return t.Reader.Find(key, false, nil) -} - -func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { - return t.Reader.Get(key, nil) -} - -func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.Reader.NewIterator(slice, nil) -} - -var _ = testutil.Defer(func() { - Describe("Table", func() { - Describe("approximate offset test", func() { - var ( - buf = &bytes.Buffer{} - o = &opt.Options{ - BlockSize: 1024, - Compression: opt.NoCompression, - } - ) - - // Building the table. - tw := NewWriter(buf, o) - tw.Append([]byte("k01"), []byte("hello")) - tw.Append([]byte("k02"), []byte("hello2")) - tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) - tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) - tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) - tw.Append([]byte("k06"), []byte("hello3")) - tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) - err := tw.Close() - - It("Should be able to approximate offset of a key correctly", func() { - Expect(err).ShouldNot(HaveOccurred()) - - tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o) - Expect(err).ShouldNot(HaveOccurred()) - CheckOffset := func(key string, expect, threshold int) { - offset, err := tr.OffsetOf([]byte(key)) - Expect(err).ShouldNot(HaveOccurred()) - Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) - } - - CheckOffset("k0", 0, 0) - CheckOffset("k01a", 0, 0) - CheckOffset("k02", 0, 0) - CheckOffset("k03", 0, 0) - CheckOffset("k04", 10000, 1000) - CheckOffset("k04a", 210000, 1000) - CheckOffset("k05", 210000, 1000) - CheckOffset("k06", 510000, 1000) - CheckOffset("k07", 510000, 1000) - CheckOffset("xyz", 610000, 2000) - }) - }) - - Describe("read test", func() { - Build := func(kv testutil.KeyValue) testutil.DB { - o := &opt.Options{ - BlockSize: 512, - BlockRestartInterval: 3, - } - buf := &bytes.Buffer{} - - // Building the table. - tw := NewWriter(buf, o) - kv.Iterate(func(i int, key, value []byte) { - tw.Append(key, value) - }) - tw.Close() - - // Opening the table. - tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o) - return tableWrapper{tr} - } - Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { - return func() { - db := Build(*kv) - if body != nil { - body(db.(tableWrapper).Reader) - } - testutil.KeyValueTesting(nil, *kv, db, nil, nil) - } - } - - testutil.AllKeyValueTesting(nil, Build, nil, nil) - Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 1, 10, 512, 512), func(r *Reader) { - It("should have correct blocks number", func() { - indexBlock, err := r.readBlock(r.indexBH, true) - Expect(err).To(BeNil()) - Expect(indexBlock.restartsLen).Should(Equal(9)) - }) - })) - }) - }) -}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/testutil/db.go deleted file mode 100644 index ec3f177a..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/db.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type DB interface{} - -type Put interface { - TestPut(key []byte, value []byte) error -} - -type Delete interface { - TestDelete(key []byte) error -} - -type Find interface { - TestFind(key []byte) (rkey, rvalue []byte, err error) -} - -type Get interface { - TestGet(key []byte) (value []byte, err error) -} - -type Has interface { - TestHas(key []byte) (ret bool, err error) -} - -type NewIterator interface { - TestNewIterator(slice *util.Range) iterator.Iterator -} - -type DBAct int - -func (a DBAct) String() string { - switch a { - case DBNone: - return "none" - case DBPut: - return "put" - case DBOverwrite: - return "overwrite" - case DBDelete: - return "delete" - case DBDeleteNA: - return "delete_na" - } - return "unknown" -} - -const ( - DBNone DBAct = iota - DBPut - DBOverwrite - DBDelete - DBDeleteNA -) - -type DBTesting struct { - Rand *rand.Rand - DB interface { - Get - Put - Delete - } - PostFn func(t *DBTesting) - Deleted, Present KeyValue - Act, LastAct DBAct - ActKey, LastActKey []byte -} - -func (t *DBTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *DBTesting) setAct(act DBAct, key []byte) { - t.LastAct, t.Act = t.Act, act - t.LastActKey, t.ActKey = t.ActKey, key -} - -func (t *DBTesting) text() string { - return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey) -} - -func (t *DBTesting) Text() string { - return "DBTesting " + t.text() -} - -func (t *DBTesting) TestPresentKV(key, value []byte) { - rvalue, err := t.DB.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text()) - Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllPresent() { - t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestPresentKV(key, value) - }) -} - -func (t *DBTesting) TestDeletedKey(key []byte) { - _, err := t.DB.TestGet(key) - Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllDeleted() { - t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestDeletedKey(key) - }) -} - -func (t *DBTesting) TestAll() { - dn := t.Deleted.Len() - pn := t.Present.Len() - ShuffledIndex(t.Rand, dn+pn, 1, func(i int) { - if i >= dn { - key, value := t.Present.Index(i - dn) - t.TestPresentKV(key, value) - } else { - t.TestDeletedKey(t.Deleted.KeyAt(i)) - } - }) -} - -func (t *DBTesting) Put(key, value []byte) { - if new := t.Present.PutU(key, value); new { - t.setAct(DBPut, key) - } else { - t.setAct(DBOverwrite, key) - } - t.Deleted.Delete(key) - err := t.DB.TestPut(key, value) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestPresentKV(key, value) - t.post() -} - -func (t *DBTesting) PutRandom() bool { - if t.Deleted.Len() > 0 { - i := t.Rand.Intn(t.Deleted.Len()) - key, value := t.Deleted.Index(i) - t.Put(key, value) - return true - } - return false -} - -func (t *DBTesting) Delete(key []byte) { - if exist, value := t.Present.Delete(key); exist { - t.setAct(DBDelete, key) - t.Deleted.PutU(key, value) - } else { - t.setAct(DBDeleteNA, key) - } - err := t.DB.TestDelete(key) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestDeletedKey(key) - t.post() -} - -func (t *DBTesting) DeleteRandom() bool { - if t.Present.Len() > 0 { - i := t.Rand.Intn(t.Present.Len()) - t.Delete(t.Present.KeyAt(i)) - return true - } - return false -} - -func (t *DBTesting) RandomAct(round int) { - for i := 0; i < round; i++ { - if t.Rand.Int()%2 == 0 { - t.PutRandom() - } else { - t.DeleteRandom() - } - } -} - -func DoDBTesting(t *DBTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - - t.DeleteRandom() - t.PutRandom() - t.DeleteRandom() - t.DeleteRandom() - for i := t.Deleted.Len() / 2; i >= 0; i-- { - t.PutRandom() - } - t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10) - - // Additional iterator testing - if db, ok := t.DB.(NewIterator); ok { - iter := db.TestNewIterator(nil) - Expect(iter.Error()).NotTo(HaveOccurred()) - - it := IteratorTesting{ - KeyValue: t.Present, - Iter: iter, - } - - DoIteratorTesting(&it) - iter.Release() - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/vendor/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go deleted file mode 100644 index 82f3d0e8..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go +++ /dev/null @@ -1,21 +0,0 @@ -package testutil - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func RunSuite(t GinkgoTestingT, name string) { - RunDefer() - - SynchronizedBeforeSuite(func() []byte { - RunDefer("setup") - return nil - }, func(data []byte) {}) - SynchronizedAfterSuite(func() { - RunDefer("teardown") - }, func() {}) - - RegisterFailHandler(Fail) - RunSpecs(t, name) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/testutil/iter.go deleted file mode 100644 index df6d9db6..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/iter.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" -) - -type IterAct int - -func (a IterAct) String() string { - switch a { - case IterNone: - return "none" - case IterFirst: - return "first" - case IterLast: - return "last" - case IterPrev: - return "prev" - case IterNext: - return "next" - case IterSeek: - return "seek" - case IterSOI: - return "soi" - case IterEOI: - return "eoi" - } - return "unknown" -} - -const ( - IterNone IterAct = iota - IterFirst - IterLast - IterPrev - IterNext - IterSeek - IterSOI - IterEOI -) - -type IteratorTesting struct { - KeyValue - Iter iterator.Iterator - Rand *rand.Rand - PostFn func(t *IteratorTesting) - Pos int - Act, LastAct IterAct - - once bool -} - -func (t *IteratorTesting) init() { - if !t.once { - t.Pos = -1 - t.once = true - } -} - -func (t *IteratorTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *IteratorTesting) setAct(act IterAct) { - t.LastAct, t.Act = t.Act, act -} - -func (t *IteratorTesting) text() string { - return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act) -} - -func (t *IteratorTesting) Text() string { - return "IteratorTesting is " + t.text() -} - -func (t *IteratorTesting) IsFirst() bool { - t.init() - return t.Len() > 0 && t.Pos == 0 -} - -func (t *IteratorTesting) IsLast() bool { - t.init() - return t.Len() > 0 && t.Pos == t.Len()-1 -} - -func (t *IteratorTesting) TestKV() { - t.init() - key, value := t.Index(t.Pos) - Expect(t.Iter.Key()).NotTo(BeNil()) - Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text()) - Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *IteratorTesting) First() { - t.init() - t.setAct(IterFirst) - - ok := t.Iter.First() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = 0 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Last() { - t.init() - t.setAct(IterLast) - - ok := t.Iter.Last() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = t.Len() - 1 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = 0 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Next() { - t.init() - t.setAct(IterNext) - - ok := t.Iter.Next() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos < t.Len()-1 { - t.Pos++ - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = t.Len() - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Prev() { - t.init() - t.setAct(IterPrev) - - ok := t.Iter.Prev() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos > 0 { - t.Pos-- - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Seek(i int) { - t.init() - t.setAct(IterSeek) - - key, _ := t.Index(i) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekInexact(i int) { - t.init() - t.setAct(IterSeek) - var key0 []byte - key1, _ := t.Index(i) - if i > 0 { - key0, _ = t.Index(i - 1) - } - key := BytesSeparator(key0, key1) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekKey(key []byte) { - t.init() - t.setAct(IterSeek) - oldKey, _ := t.IndexOrNil(t.Pos) - i := t.Search(key) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if i < t.Len() { - key_, _ := t.Index(i) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text())) - t.Pos = i - t.TestKV() - } else { - Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text())) - } - - t.Pos = i - t.post() -} - -func (t *IteratorTesting) SOI() { - t.init() - t.setAct(IterSOI) - Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text()) - for i := 0; i < 3; i++ { - t.Prev() - } - t.post() -} - -func (t *IteratorTesting) EOI() { - t.init() - t.setAct(IterEOI) - Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text()) - for i := 0; i < 3; i++ { - t.Next() - } - t.post() -} - -func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos > 0; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically("<", old), t.Text()) - } -} - -func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically(">", old), t.Text()) - } -} - -func (t *IteratorTesting) PrevAll() { - t.WalkPrev(func(t *IteratorTesting) { - t.Prev() - }) -} - -func (t *IteratorTesting) NextAll() { - t.WalkNext(func(t *IteratorTesting) { - t.Next() - }) -} - -func DoIteratorTesting(t *IteratorTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - t.SOI() - t.NextAll() - t.First() - t.SOI() - t.NextAll() - t.EOI() - t.PrevAll() - t.Last() - t.EOI() - t.PrevAll() - t.SOI() - - t.NextAll() - t.PrevAll() - t.NextAll() - t.Last() - t.PrevAll() - t.First() - t.NextAll() - t.EOI() - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.SeekInexact(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - if i%2 != 0 { - t.PrevAll() - t.SOI() - } else { - t.NextAll() - t.EOI() - } - }) - - for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} { - t.SeekKey([]byte(key)) - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/vendor/github.com/syndtr/goleveldb/leveldb/testutil/kv.go deleted file mode 100644 index 608cbf37..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/kv.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - "sort" - "strings" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -type KeyValueEntry struct { - key, value []byte -} - -type KeyValue struct { - entries []KeyValueEntry - nbytes int -} - -func (kv *KeyValue) Put(key, value []byte) { - if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 { - panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key)) - } - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - kv.nbytes += len(key) + len(value) -} - -func (kv *KeyValue) PutString(key, value string) { - kv.Put([]byte(key), []byte(value)) -} - -func (kv *KeyValue) PutU(key, value []byte) bool { - if i, exist := kv.Get(key); !exist { - if i < kv.Len() { - kv.entries = append(kv.entries[:i+1], kv.entries[i:]...) - kv.entries[i] = KeyValueEntry{key, value} - } else { - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - } - kv.nbytes += len(key) + len(value) - return true - } else { - kv.nbytes += len(value) - len(kv.ValueAt(i)) - kv.entries[i].value = value - } - return false -} - -func (kv *KeyValue) PutUString(key, value string) bool { - return kv.PutU([]byte(key), []byte(value)) -} - -func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) { - i, exist := kv.Get(key) - if exist { - value = kv.entries[i].value - kv.DeleteIndex(i) - } - return -} - -func (kv *KeyValue) DeleteIndex(i int) bool { - if i < kv.Len() { - kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i)) - kv.entries = append(kv.entries[:i], kv.entries[i+1:]...) - return true - } - return false -} - -func (kv KeyValue) Len() int { - return len(kv.entries) -} - -func (kv *KeyValue) Size() int { - return kv.nbytes -} - -func (kv KeyValue) KeyAt(i int) []byte { - return kv.entries[i].key -} - -func (kv KeyValue) ValueAt(i int) []byte { - return kv.entries[i].value -} - -func (kv KeyValue) Index(i int) (key, value []byte) { - if i < 0 || i >= len(kv.entries) { - panic(fmt.Sprintf("Index #%d: out of range", i)) - } - return kv.entries[i].key, kv.entries[i].value -} - -func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) { - key, value = kv.Index(i) - var key0 []byte - var key1 = kv.KeyAt(i) - if i > 0 { - key0 = kv.KeyAt(i - 1) - } - key_ = BytesSeparator(key0, key1) - return -} - -func (kv KeyValue) IndexOrNil(i int) (key, value []byte) { - if i >= 0 && i < len(kv.entries) { - return kv.entries[i].key, kv.entries[i].value - } - return nil, nil -} - -func (kv KeyValue) IndexString(i int) (key, value string) { - key_, _value := kv.Index(i) - return string(key_), string(_value) -} - -func (kv KeyValue) Search(key []byte) int { - return sort.Search(kv.Len(), func(i int) bool { - return cmp.Compare(kv.KeyAt(i), key) >= 0 - }) -} - -func (kv KeyValue) SearchString(key string) int { - return kv.Search([]byte(key)) -} - -func (kv KeyValue) Get(key []byte) (i int, exist bool) { - i = kv.Search(key) - if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 { - exist = true - } - return -} - -func (kv KeyValue) GetString(key string) (i int, exist bool) { - return kv.Get([]byte(key)) -} - -func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) { - for i, x := range kv.entries { - fn(i, x.key, x.value) - } -} - -func (kv KeyValue) IterateString(fn func(i int, key, value string)) { - kv.Iterate(func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) { - ShuffledIndex(rnd, kv.Len(), 1, func(i int) { - fn(i, kv.entries[i].key, kv.entries[i].value) - }) -} - -func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) { - kv.IterateShuffled(rnd, func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) { - for i := range kv.entries { - key_, key, value := kv.IndexInexact(i) - fn(i, key_, key, value) - } -} - -func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) { - kv.IterateInexact(func(i int, key_, key, value []byte) { - fn(i, string(key_), string(key), string(value)) - }) -} - -func (kv KeyValue) Clone() KeyValue { - return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes} -} - -func (kv KeyValue) Slice(start, limit int) KeyValue { - if start < 0 || limit > kv.Len() { - panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit)) - } else if limit < start { - panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit)) - } - return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes} -} - -func (kv KeyValue) SliceKey(start, limit []byte) KeyValue { - start_ := 0 - limit_ := kv.Len() - if start != nil { - start_ = kv.Search(start) - } - if limit != nil { - limit_ = kv.Search(limit) - } - return kv.Slice(start_, limit_) -} - -func (kv KeyValue) SliceKeyString(start, limit string) KeyValue { - return kv.SliceKey([]byte(start), []byte(limit)) -} - -func (kv KeyValue) SliceRange(r *util.Range) KeyValue { - if r != nil { - return kv.SliceKey(r.Start, r.Limit) - } - return kv.Clone() -} - -func (kv KeyValue) Range(start, limit int) (r util.Range) { - if kv.Len() > 0 { - if start == kv.Len() { - r.Start = BytesAfter(kv.KeyAt(start - 1)) - } else { - r.Start = kv.KeyAt(start) - } - } - if limit < kv.Len() { - r.Limit = kv.KeyAt(limit) - } - return -} - -func KeyValue_EmptyKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("", "v") - return kv -} - -func KeyValue_EmptyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "") - kv.PutString("abcd", "") - return kv -} - -func KeyValue_OneKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "v") - return kv -} - -func KeyValue_BigValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("big1", strings.Repeat("1", 200000)) - return kv -} - -func KeyValue_SpecialKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("\xff\xff", "v3") - return kv -} - -func KeyValue_MultipleKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("a", "v") - kv.PutString("aa", "v1") - kv.PutString("aaa", "v2") - kv.PutString("aaacccccccccc", "v2") - kv.PutString("aaaccccccccccd", "v3") - kv.PutString("aaaccccccccccf", "v4") - kv.PutString("aaaccccccccccfg", "v5") - kv.PutString("ab", "v6") - kv.PutString("abc", "v7") - kv.PutString("abcd", "v8") - kv.PutString("accccccccccccccc", "v9") - kv.PutString("b", "v10") - kv.PutString("bb", "v11") - kv.PutString("bc", "v12") - kv.PutString("c", "v13") - kv.PutString("c1", "v13") - kv.PutString("czzzzzzzzzzzzzz", "v14") - kv.PutString("fffffffffffffff", "v15") - kv.PutString("g11", "v15") - kv.PutString("g111", "v15") - kv.PutString("g111\xff", "v15") - kv.PutString("zz", "v16") - kv.PutString("zzzzzzz", "v16") - kv.PutString("zzzzzzzzzzzzzzzz", "v16") - return kv -} - -var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy") - -func KeyValue_Generate(rnd *rand.Rand, n, incr, minlen, maxlen, vminlen, vmaxlen int) *KeyValue { - if rnd == nil { - rnd = NewRand() - } - if maxlen < minlen { - panic("max len should >= min len") - } - - rrand := func(min, max int) int { - if min == max { - return max - } - return rnd.Intn(max-min) + min - } - - kv := &KeyValue{} - endC := byte(len(keymap) - incr) - gen := make([]byte, 0, maxlen) - for i := 0; i < n; i++ { - m := rrand(minlen, maxlen) - last := gen - retry: - gen = last[:m] - if k := len(last); m > k { - for j := k; j < m; j++ { - gen[j] = 0 - } - } else { - for j := m - 1; j >= 0; j-- { - c := last[j] - if c == endC { - continue - } - gen[j] = c + byte(incr) - for j++; j < m; j++ { - gen[j] = 0 - } - goto ok - } - if m < maxlen { - m++ - goto retry - } - panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n)) - ok: - } - key := make([]byte, m) - for j := 0; j < m; j++ { - key[j] = keymap[gen[j]] - } - value := make([]byte, rrand(vminlen, vmaxlen)) - for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ { - value[n] = 'x' - } - kv.Put(key, value) - } - return kv -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/vendor/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go deleted file mode 100644 index f7563dc8..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func TestFind(db Find, kv KeyValue) { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rkey, rvalue, err := db.TestFind(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for exact key %q", key) - Expect(rkey).Should(Equal(key), "Key") - Expect(rvalue).Should(Equal(value), "Value for exact key %q", key) - - // Using inexact key. - rkey, rvalue, err = db.TestFind(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for inexact key %q (%q)", key_, key) - Expect(rkey).Should(Equal(key), "Key for inexact key %q (%q)", key_, key) - Expect(rvalue).Should(Equal(value), "Value for inexact key %q (%q)", key_, key) - }) -} - -func TestFindAfterLast(db Find, kv KeyValue) { - var key []byte - if kv.Len() > 0 { - key_, _ := kv.Index(kv.Len() - 1) - key = BytesAfter(key_) - } - rkey, _, err := db.TestFind(key) - Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) - Expect(err).Should(Equal(errors.ErrNotFound)) -} - -func TestGet(db Get, kv KeyValue) { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rvalue, err := db.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - _, err = db.TestGet(key_) - Expect(err).Should(HaveOccurred(), "Error for key %q", key_) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) -} - -func TestHas(db Has, kv KeyValue) { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, _ := kv.IndexInexact(i) - - // Using exact key. - ret, err := db.TestHas(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(ret).Should(BeTrue(), "False for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - ret, err = db.TestHas(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_) - Expect(ret).ShouldNot(BeTrue(), "True for key %q", key) - } - }) -} - -func TestIter(db NewIterator, r *util.Range, kv KeyValue) { - iter := db.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := IteratorTesting{ - KeyValue: kv, - Iter: iter, - } - - DoIteratorTesting(&t) - iter.Release() -} - -func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) { - if rnd == nil { - rnd = NewRand() - } - - if p == nil { - BeforeEach(func() { - p = setup(kv) - }) - if teardown != nil { - AfterEach(func() { - teardown(p) - }) - } - } - - It("Should find all keys with Find", func() { - if db, ok := p.(Find); ok { - TestFind(db, kv) - } - }) - - It("Should return error if Find on key after the last", func() { - if db, ok := p.(Find); ok { - TestFindAfterLast(db, kv) - } - }) - - It("Should only find exact key with Get", func() { - if db, ok := p.(Get); ok { - TestGet(db, kv) - } - }) - - It("Should only find present key with Has", func() { - if db, ok := p.(Has); ok { - TestHas(db, kv) - } - }) - - It("Should iterates and seeks correctly", func(done Done) { - if db, ok := p.(NewIterator); ok { - TestIter(db, nil, kv.Clone()) - } - done <- true - }, 30.0) - - It("Should iterates and seeks slice correctly", func(done Done) { - if db, ok := p.(NewIterator); ok { - RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) { - type slice struct { - r *util.Range - start, limit int - } - - key_, _, _ := kv.IndexInexact(i) - for _, x := range []slice{ - {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, - {&util.Range{Start: nil, Limit: key_}, 0, i}, - } { - By(fmt.Sprintf("Random index of %d .. %d", x.start, x.limit), func() { - TestIter(db, x.r, kv.Slice(x.start, x.limit)) - }) - } - }) - } - done <- true - }, 200.0) - - It("Should iterates and seeks slice correctly", func(done Done) { - if db, ok := p.(NewIterator); ok { - RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) { - By(fmt.Sprintf("Random range of %d .. %d", start, limit), func() { - r := kv.Range(start, limit) - TestIter(db, &r, kv.Slice(start, limit)) - }) - }) - } - done <- true - }, 200.0) -} - -func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) { - Test := func(kv *KeyValue) func() { - return func() { - var p DB - if setup != nil { - Defer("setup", func() { - p = setup(*kv) - }) - } - if teardown != nil { - Defer("teardown", func() { - teardown(p) - }) - } - if body != nil { - p = body(*kv) - } - KeyValueTesting(rnd, *kv, p, func(KeyValue) DB { - return p - }, nil) - } - } - - Describe("with no key/value (empty)", Test(&KeyValue{})) - Describe("with empty key", Test(KeyValue_EmptyKey())) - Describe("with empty value", Test(KeyValue_EmptyValue())) - Describe("with one key/value", Test(KeyValue_OneKeyValue())) - Describe("with big value", Test(KeyValue_BigValue())) - Describe("with special key", Test(KeyValue_SpecialKey())) - Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue())) - Describe("with generated key/value 2-incr", Test(KeyValue_Generate(nil, 120, 2, 1, 50, 10, 120))) - Describe("with generated key/value 3-incr", Test(KeyValue_Generate(nil, 120, 3, 1, 50, 10, 120))) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/testutil/storage.go deleted file mode 100644 index 581daf31..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/storage.go +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "fmt" - "io" - "math/rand" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/storage" -) - -var ( - storageMu sync.Mutex - storageUseFS = true - storageKeepFS = false - storageNum int -) - -type StorageMode int - -const ( - ModeOpen StorageMode = 1 << iota - ModeCreate - ModeRemove - ModeRename - ModeRead - ModeWrite - ModeSync - ModeClose -) - -const ( - modeOpen = iota - modeCreate - modeRemove - modeRename - modeRead - modeWrite - modeSync - modeClose - - modeCount -) - -const ( - typeManifest = iota - typeJournal - typeTable - typeTemp - - typeCount -) - -const flattenCount = modeCount * typeCount - -func flattenType(m StorageMode, t storage.FileType) int { - var x int - switch m { - case ModeOpen: - x = modeOpen - case ModeCreate: - x = modeCreate - case ModeRemove: - x = modeRemove - case ModeRename: - x = modeRename - case ModeRead: - x = modeRead - case ModeWrite: - x = modeWrite - case ModeSync: - x = modeSync - case ModeClose: - x = modeClose - default: - panic("invalid storage mode") - } - x *= typeCount - switch t { - case storage.TypeManifest: - return x + typeManifest - case storage.TypeJournal: - return x + typeJournal - case storage.TypeTable: - return x + typeTable - case storage.TypeTemp: - return x + typeTemp - default: - panic("invalid file type") - } -} - -func listFlattenType(m StorageMode, t storage.FileType) []int { - ret := make([]int, 0, flattenCount) - add := func(x int) { - x *= typeCount - switch { - case t&storage.TypeManifest != 0: - ret = append(ret, x+typeManifest) - case t&storage.TypeJournal != 0: - ret = append(ret, x+typeJournal) - case t&storage.TypeTable != 0: - ret = append(ret, x+typeTable) - case t&storage.TypeTemp != 0: - ret = append(ret, x+typeTemp) - } - } - switch { - case m&ModeOpen != 0: - add(modeOpen) - case m&ModeCreate != 0: - add(modeCreate) - case m&ModeRemove != 0: - add(modeRemove) - case m&ModeRename != 0: - add(modeRename) - case m&ModeRead != 0: - add(modeRead) - case m&ModeWrite != 0: - add(modeWrite) - case m&ModeSync != 0: - add(modeSync) - case m&ModeClose != 0: - add(modeClose) - } - return ret -} - -func packFile(fd storage.FileDesc) uint64 { - if fd.Num>>(63-typeCount) != 0 { - panic("overflow") - } - return uint64(fd.Num<> typeCount)} -} - -type emulatedError struct { - err error -} - -func (err emulatedError) Error() string { - return fmt.Sprintf("emulated storage error: %v", err.err) -} - -type storageLock struct { - s *Storage - l storage.Locker -} - -func (l storageLock) Unlock() { - l.l.Unlock() - l.s.logI("storage lock released") -} - -type reader struct { - s *Storage - fd storage.FileDesc - storage.Reader -} - -func (r *reader) Read(p []byte) (n int, err error) { - err = r.s.emulateError(ModeRead, r.fd.Type) - if err == nil { - r.s.stall(ModeRead, r.fd.Type) - n, err = r.Reader.Read(p) - } - r.s.count(ModeRead, r.fd.Type, n) - if err != nil && err != io.EOF { - r.s.logI("read error, fd=%s n=%d err=%v", r.fd, n, err) - } - return -} - -func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { - err = r.s.emulateError(ModeRead, r.fd.Type) - if err == nil { - r.s.stall(ModeRead, r.fd.Type) - n, err = r.Reader.ReadAt(p, off) - } - r.s.count(ModeRead, r.fd.Type, n) - if err != nil && err != io.EOF { - r.s.logI("readAt error, fd=%s offset=%d n=%d err=%v", r.fd, off, n, err) - } - return -} - -func (r *reader) Close() (err error) { - return r.s.fileClose(r.fd, r.Reader) -} - -type writer struct { - s *Storage - fd storage.FileDesc - storage.Writer -} - -func (w *writer) Write(p []byte) (n int, err error) { - err = w.s.emulateError(ModeWrite, w.fd.Type) - if err == nil { - w.s.stall(ModeWrite, w.fd.Type) - n, err = w.Writer.Write(p) - } - w.s.count(ModeWrite, w.fd.Type, n) - if err != nil && err != io.EOF { - w.s.logI("write error, fd=%s n=%d err=%v", w.fd, n, err) - } - return -} - -func (w *writer) Sync() (err error) { - err = w.s.emulateError(ModeSync, w.fd.Type) - if err == nil { - w.s.stall(ModeSync, w.fd.Type) - err = w.Writer.Sync() - } - w.s.count(ModeSync, w.fd.Type, 0) - if err != nil { - w.s.logI("sync error, fd=%s err=%v", w.fd, err) - } - return -} - -func (w *writer) Close() (err error) { - return w.s.fileClose(w.fd, w.Writer) -} - -type Storage struct { - storage.Storage - path string - onClose func() (preserve bool, err error) - onLog func(str string) - - lmu sync.Mutex - lb bytes.Buffer - - mu sync.Mutex - rand *rand.Rand - // Open files, true=writer, false=reader - opens map[uint64]bool - counters [flattenCount]int - bytesCounter [flattenCount]int64 - emulatedError [flattenCount]error - emulatedErrorOnce [flattenCount]bool - emulatedRandomError [flattenCount]error - emulatedRandomErrorProb [flattenCount]float64 - stallCond sync.Cond - stalled [flattenCount]bool -} - -func (s *Storage) log(skip int, str string) { - s.lmu.Lock() - defer s.lmu.Unlock() - _, file, line, ok := runtime.Caller(skip + 2) - if ok { - // Truncate file name at last file name separator. - if index := strings.LastIndex(file, "/"); index >= 0 { - file = file[index+1:] - } else if index = strings.LastIndex(file, "\\"); index >= 0 { - file = file[index+1:] - } - } else { - file = "???" - line = 1 - } - fmt.Fprintf(&s.lb, "%s:%d: ", file, line) - lines := strings.Split(str, "\n") - if l := len(lines); l > 1 && lines[l-1] == "" { - lines = lines[:l-1] - } - for i, line := range lines { - if i > 0 { - s.lb.WriteString("\n\t") - } - s.lb.WriteString(line) - } - if s.onLog != nil { - s.onLog(s.lb.String()) - s.lb.Reset() - } else { - s.lb.WriteByte('\n') - } -} - -func (s *Storage) logISkip(skip int, format string, args ...interface{}) { - pc, _, _, ok := runtime.Caller(skip + 1) - if ok { - if f := runtime.FuncForPC(pc); f != nil { - fname := f.Name() - if index := strings.LastIndex(fname, "."); index >= 0 { - fname = fname[index+1:] - } - format = fname + ": " + format - } - } - s.log(skip+1, fmt.Sprintf(format, args...)) -} - -func (s *Storage) logI(format string, args ...interface{}) { - s.logISkip(1, format, args...) -} - -func (s *Storage) OnLog(onLog func(log string)) { - s.lmu.Lock() - s.onLog = onLog - if s.lb.Len() != 0 { - log := s.lb.String() - s.onLog(log[:len(log)-1]) - s.lb.Reset() - } - s.lmu.Unlock() -} - -func (s *Storage) Log(str string) { - s.log(1, "Log: "+str) - s.Storage.Log(str) -} - -func (s *Storage) Lock() (l storage.Locker, err error) { - l, err = s.Storage.Lock() - if err != nil { - s.logI("storage locking failed, err=%v", err) - } else { - s.logI("storage locked") - l = storageLock{s, l} - } - return -} - -func (s *Storage) List(t storage.FileType) (fds []storage.FileDesc, err error) { - fds, err = s.Storage.List(t) - if err != nil { - s.logI("list failed, err=%v", err) - return - } - s.logI("list, type=0x%x count=%d", int(t), len(fds)) - return -} - -func (s *Storage) GetMeta() (fd storage.FileDesc, err error) { - fd, err = s.Storage.GetMeta() - if err != nil { - if !os.IsNotExist(err) { - s.logI("get meta failed, err=%v", err) - } - return - } - s.logI("get meta, fd=%s", fd) - return -} - -func (s *Storage) SetMeta(fd storage.FileDesc) error { - ExpectWithOffset(1, fd.Type).To(Equal(storage.TypeManifest)) - err := s.Storage.SetMeta(fd) - if err != nil { - s.logI("set meta failed, fd=%s err=%v", fd, err) - } else { - s.logI("set meta, fd=%s", fd) - } - return err -} - -func (s *Storage) fileClose(fd storage.FileDesc, closer io.Closer) (err error) { - err = s.emulateError(ModeClose, fd.Type) - if err == nil { - s.stall(ModeClose, fd.Type) - } - x := packFile(fd) - s.mu.Lock() - defer s.mu.Unlock() - if err == nil { - ExpectWithOffset(2, s.opens).To(HaveKey(x), "File closed, fd=%s", fd) - err = closer.Close() - } - s.countNB(ModeClose, fd.Type, 0) - writer := s.opens[x] - if err != nil { - s.logISkip(1, "file close failed, fd=%s writer=%v err=%v", fd, writer, err) - } else { - s.logISkip(1, "file closed, fd=%s writer=%v", fd, writer) - delete(s.opens, x) - } - return -} - -func (s *Storage) assertOpen(fd storage.FileDesc) { - x := packFile(fd) - ExpectWithOffset(2, s.opens).NotTo(HaveKey(x), "File open, fd=%s writer=%v", fd, s.opens[x]) -} - -func (s *Storage) Open(fd storage.FileDesc) (r storage.Reader, err error) { - err = s.emulateError(ModeOpen, fd.Type) - if err == nil { - s.stall(ModeOpen, fd.Type) - } - s.mu.Lock() - defer s.mu.Unlock() - if err == nil { - s.assertOpen(fd) - s.countNB(ModeOpen, fd.Type, 0) - r, err = s.Storage.Open(fd) - } - if err != nil { - s.logI("file open failed, fd=%s err=%v", fd, err) - } else { - s.logI("file opened, fd=%s", fd) - s.opens[packFile(fd)] = false - r = &reader{s, fd, r} - } - return -} - -func (s *Storage) Create(fd storage.FileDesc) (w storage.Writer, err error) { - err = s.emulateError(ModeCreate, fd.Type) - if err == nil { - s.stall(ModeCreate, fd.Type) - } - s.mu.Lock() - defer s.mu.Unlock() - if err == nil { - s.assertOpen(fd) - s.countNB(ModeCreate, fd.Type, 0) - w, err = s.Storage.Create(fd) - } - if err != nil { - s.logI("file create failed, fd=%s err=%v", fd, err) - } else { - s.logI("file created, fd=%s", fd) - s.opens[packFile(fd)] = true - w = &writer{s, fd, w} - } - return -} - -func (s *Storage) Remove(fd storage.FileDesc) (err error) { - err = s.emulateError(ModeRemove, fd.Type) - if err == nil { - s.stall(ModeRemove, fd.Type) - } - s.mu.Lock() - defer s.mu.Unlock() - if err == nil { - s.assertOpen(fd) - s.countNB(ModeRemove, fd.Type, 0) - err = s.Storage.Remove(fd) - } - if err != nil { - s.logI("file remove failed, fd=%s err=%v", fd, err) - } else { - s.logI("file removed, fd=%s", fd) - } - return -} - -func (s *Storage) ForceRemove(fd storage.FileDesc) (err error) { - s.countNB(ModeRemove, fd.Type, 0) - if err = s.Storage.Remove(fd); err != nil { - s.logI("file remove failed (forced), fd=%s err=%v", fd, err) - } else { - s.logI("file removed (forced), fd=%s", fd) - } - return -} - -func (s *Storage) Rename(oldfd, newfd storage.FileDesc) (err error) { - err = s.emulateError(ModeRename, oldfd.Type) - if err == nil { - s.stall(ModeRename, oldfd.Type) - } - s.mu.Lock() - defer s.mu.Unlock() - if err == nil { - s.assertOpen(oldfd) - s.assertOpen(newfd) - s.countNB(ModeRename, oldfd.Type, 0) - err = s.Storage.Rename(oldfd, newfd) - } - if err != nil { - s.logI("file rename failed, oldfd=%s newfd=%s err=%v", oldfd, newfd, err) - } else { - s.logI("file renamed, oldfd=%s newfd=%s", oldfd, newfd) - } - return -} - -func (s *Storage) ForceRename(oldfd, newfd storage.FileDesc) (err error) { - s.countNB(ModeRename, oldfd.Type, 0) - if err = s.Storage.Rename(oldfd, newfd); err != nil { - s.logI("file rename failed (forced), oldfd=%s newfd=%s err=%v", oldfd, newfd, err) - } else { - s.logI("file renamed (forced), oldfd=%s newfd=%s", oldfd, newfd) - } - return -} - -func (s *Storage) openFiles() string { - out := "Open files:" - for x, writer := range s.opens { - fd := unpackFile(x) - out += fmt.Sprintf("\n · fd=%s writer=%v", fd, writer) - } - return out -} - -func (s *Storage) CloseCheck() { - s.mu.Lock() - defer s.mu.Unlock() - ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) -} - -func (s *Storage) OnClose(onClose func() (preserve bool, err error)) { - s.mu.Lock() - s.onClose = onClose - s.mu.Unlock() -} - -func (s *Storage) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) - err := s.Storage.Close() - if err != nil { - s.logI("storage closing failed, err=%v", err) - } else { - s.logI("storage closed") - } - var preserve bool - if s.onClose != nil { - var err0 error - if preserve, err0 = s.onClose(); err0 != nil { - s.logI("onClose error, err=%v", err0) - } - } - if s.path != "" { - if storageKeepFS || preserve { - s.logI("storage is preserved, path=%v", s.path) - } else { - if err1 := os.RemoveAll(s.path); err1 != nil { - s.logI("cannot remove storage, err=%v", err1) - } else { - s.logI("storage has been removed") - } - } - } - return err -} - -func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) { - s.counters[flattenType(m, t)]++ - s.bytesCounter[flattenType(m, t)] += int64(n) -} - -func (s *Storage) count(m StorageMode, t storage.FileType, n int) { - s.mu.Lock() - defer s.mu.Unlock() - s.countNB(m, t, n) -} - -func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) { - for _, x := range listFlattenType(m, t) { - s.counters[x] = 0 - s.bytesCounter[x] = 0 - } -} - -func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) { - for _, x := range listFlattenType(m, t) { - count += s.counters[x] - bytes += s.bytesCounter[x] - } - return -} - -func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { - s.mu.Lock() - defer s.mu.Unlock() - x := flattenType(m, t) - if err := s.emulatedError[x]; err != nil { - if s.emulatedErrorOnce[x] { - s.emulatedError[x] = nil - } - return emulatedError{err} - } - if err := s.emulatedRandomError[x]; err != nil && s.rand.Float64() < s.emulatedRandomErrorProb[x] { - return emulatedError{err} - } - return nil -} - -func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.emulatedError[x] = err - s.emulatedErrorOnce[x] = false - } -} - -func (s *Storage) EmulateErrorOnce(m StorageMode, t storage.FileType, err error) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.emulatedError[x] = err - s.emulatedErrorOnce[x] = true - } -} - -func (s *Storage) EmulateRandomError(m StorageMode, t storage.FileType, prob float64, err error) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.emulatedRandomError[x] = err - s.emulatedRandomErrorProb[x] = prob - } -} - -func (s *Storage) stall(m StorageMode, t storage.FileType) { - x := flattenType(m, t) - s.mu.Lock() - defer s.mu.Unlock() - for s.stalled[x] { - s.stallCond.Wait() - } -} - -func (s *Storage) Stall(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = true - } -} - -func (s *Storage) Release(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = false - } - s.stallCond.Broadcast() -} - -func NewStorage() *Storage { - var ( - stor storage.Storage - path string - ) - if storageUseFS { - for { - storageMu.Lock() - num := storageNum - storageNum++ - storageMu.Unlock() - path = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); os.IsNotExist(err) { - stor, err = storage.OpenFile(path, false) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) - break - } - } - } else { - stor = storage.NewMemStorage() - } - s := &Storage{ - Storage: stor, - path: path, - rand: NewRand(), - opens: make(map[uint64]bool), - } - s.stallCond.L = &s.mu - if s.path != "" { - s.logI("using FS storage") - s.logI("storage path: %s", s.path) - } else { - s.logI("using MEM storage") - } - return s -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/testutil/util.go deleted file mode 100644 index 97c5294b..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/testutil/util.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "flag" - "math/rand" - "reflect" - "sync" - - "github.com/onsi/ginkgo/config" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -var ( - runfn = make(map[string][]func()) - runmu sync.Mutex -) - -func Defer(args ...interface{}) bool { - var ( - group string - fn func() - ) - for _, arg := range args { - v := reflect.ValueOf(arg) - switch v.Kind() { - case reflect.String: - group = v.String() - case reflect.Func: - r := reflect.ValueOf(&fn).Elem() - r.Set(v) - } - } - if fn != nil { - runmu.Lock() - runfn[group] = append(runfn[group], fn) - runmu.Unlock() - } - return true -} - -func RunDefer(groups ...string) bool { - if len(groups) == 0 { - groups = append(groups, "") - } - runmu.Lock() - var runfn_ []func() - for _, group := range groups { - runfn_ = append(runfn_, runfn[group]...) - delete(runfn, group) - } - runmu.Unlock() - for _, fn := range runfn_ { - fn() - } - return runfn_ != nil -} - -func RandomSeed() int64 { - if !flag.Parsed() { - panic("random seed not initialized") - } - return config.GinkgoConfig.RandomSeed -} - -func NewRand() *rand.Rand { - return rand.New(rand.NewSource(RandomSeed())) -} - -var cmp = comparer.DefaultComparer - -func BytesSeparator(a, b []byte) []byte { - if bytes.Equal(a, b) { - return b - } - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && (a[i] == b[i]); i++ { - } - x := append([]byte{}, a[:i]...) - if i < n { - if c := a[i] + 1; c < b[i] { - return append(x, c) - } - x = append(x, a[i]) - i++ - } - for ; i < len(a); i++ { - if c := a[i]; c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - if len(b) > i && b[i] > 0 { - return append(x, b[i]-1) - } - return append(x, 'x') -} - -func BytesAfter(b []byte) []byte { - var x []byte - for _, c := range b { - if c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - return append(x, 'x') -} - -func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - fn(rnd.Intn(n)) - } - return -} - -func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - for _, i := range rnd.Perm(n) { - fn(i) - } - } - return -} - -func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - start := rnd.Intn(n) - length := 0 - if j := n - start; j > 0 { - length = rnd.Intn(j) - } - fn(start, start+length) - } - return -} - -func Max(x, y int) int { - if x > y { - return x - } - return y -} - -func Min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/testutil_test.go deleted file mode 100644 index c8cb44c4..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/testutil_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type testingDB struct { - *DB - ro *opt.ReadOptions - wo *opt.WriteOptions - stor *testutil.Storage -} - -func (t *testingDB) TestPut(key []byte, value []byte) error { - return t.Put(key, value, t.wo) -} - -func (t *testingDB) TestDelete(key []byte) error { - return t.Delete(key, t.wo) -} - -func (t *testingDB) TestGet(key []byte) (value []byte, err error) { - return t.Get(key, t.ro) -} - -func (t *testingDB) TestHas(key []byte) (ret bool, err error) { - return t.Has(key, t.ro) -} - -func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.NewIterator(slice, t.ro) -} - -func (t *testingDB) TestClose() { - err := t.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - err = t.stor.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) -} - -func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { - stor := testutil.NewStorage() - db, err := Open(stor, o) - // FIXME: This may be called from outside It, which may cause panic. - Expect(err).NotTo(HaveOccurred()) - return &testingDB{ - DB: db, - ro: ro, - wo: wo, - stor: stor, - } -} - -type testingTransaction struct { - *Transaction - ro *opt.ReadOptions - wo *opt.WriteOptions -} - -func (t *testingTransaction) TestPut(key []byte, value []byte) error { - return t.Put(key, value, t.wo) -} - -func (t *testingTransaction) TestDelete(key []byte) error { - return t.Delete(key, t.wo) -} - -func (t *testingTransaction) TestGet(key []byte) (value []byte, err error) { - return t.Get(key, t.ro) -} - -func (t *testingTransaction) TestHas(key []byte) (ret bool, err error) { - return t.Has(key, t.ro) -} - -func (t *testingTransaction) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.NewIterator(slice, t.ro) -} - -func (t *testingTransaction) TestClose() {} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go index e572a329..0e2b519e 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go @@ -20,7 +20,7 @@ func shorten(str string) string { return str[:3] + ".." + str[len(str)-3:] } -var bunits = [...]string{"", "Ki", "Mi", "Gi"} +var bunits = [...]string{"", "Ki", "Mi", "Gi", "Ti"} func shortenb(bytes int) string { i := 0 diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go deleted file mode 100644 index 87d96739..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "io" - "math/rand" - "runtime" - "testing" -) - -const N = 10000 // make this bigger for a larger (and slower) test -var data string // test data for write tests -var testBytes []byte // test data; same as data but as a slice. - -func init() { - testBytes = make([]byte, N) - for i := 0; i < N; i++ { - testBytes[i] = 'a' + byte(i%26) - } - data = string(testBytes) -} - -// Verify that contents of buf match the string s. -func check(t *testing.T, testname string, buf *Buffer, s string) { - bytes := buf.Bytes() - str := buf.String() - if buf.Len() != len(bytes) { - t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) - } - - if buf.Len() != len(str) { - t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) - } - - if buf.Len() != len(s) { - t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) - } - - if string(bytes) != s { - t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) - } -} - -// Fill buf through n writes of byte slice fub. -// The initial contents of buf corresponds to the string s; -// the result is the final contents of buf returned as a string. -func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { - check(t, testname+" (fill 1)", buf, s) - for ; n > 0; n-- { - m, err := buf.Write(fub) - if m != len(fub) { - t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) - } - if err != nil { - t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) - } - s += string(fub) - check(t, testname+" (fill 4)", buf, s) - } - return s -} - -func TestNewBuffer(t *testing.T) { - buf := NewBuffer(testBytes) - check(t, "NewBuffer", buf, data) -} - -// Empty buf through repeated reads into fub. -// The initial contents of buf corresponds to the string s. -func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { - check(t, testname+" (empty 1)", buf, s) - - for { - n, err := buf.Read(fub) - if n == 0 { - break - } - if err != nil { - t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) - } - s = s[n:] - check(t, testname+" (empty 3)", buf, s) - } - - check(t, testname+" (empty 4)", buf, "") -} - -func TestBasicOperations(t *testing.T) { - var buf Buffer - - for i := 0; i < 5; i++ { - check(t, "TestBasicOperations (1)", &buf, "") - - buf.Reset() - check(t, "TestBasicOperations (2)", &buf, "") - - buf.Truncate(0) - check(t, "TestBasicOperations (3)", &buf, "") - - n, err := buf.Write([]byte(data[0:1])) - if n != 1 { - t.Errorf("wrote 1 byte, but n == %d", n) - } - if err != nil { - t.Errorf("err should always be nil, but err == %s", err) - } - check(t, "TestBasicOperations (4)", &buf, "a") - - buf.WriteByte(data[1]) - check(t, "TestBasicOperations (5)", &buf, "ab") - - n, err = buf.Write([]byte(data[2:26])) - if n != 24 { - t.Errorf("wrote 25 bytes, but n == %d", n) - } - check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) - - buf.Truncate(26) - check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) - - buf.Truncate(20) - check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) - - empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) - empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) - - buf.WriteByte(data[1]) - c, err := buf.ReadByte() - if err != nil { - t.Error("ReadByte unexpected eof") - } - if c != data[1] { - t.Errorf("ReadByte wrong value c=%v", c) - } - c, err = buf.ReadByte() - if err == nil { - t.Error("ReadByte unexpected not eof") - } - } -} - -func TestLargeByteWrites(t *testing.T) { - var buf Buffer - limit := 30 - if testing.Short() { - limit = 9 - } - for i := 3; i < limit; i += 3 { - s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) - empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) - } - check(t, "TestLargeByteWrites (3)", &buf, "") -} - -func TestLargeByteReads(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) - } - check(t, "TestLargeByteReads (3)", &buf, "") -} - -func TestMixedReadsAndWrites(t *testing.T) { - var buf Buffer - s := "" - for i := 0; i < 50; i++ { - wlen := rand.Intn(len(data)) - s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) - rlen := rand.Intn(len(data)) - fub := make([]byte, rlen) - n, _ := buf.Read(fub) - s = s[n:] - } - empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) -} - -func TestNil(t *testing.T) { - var b *Buffer - if b.String() != "" { - t.Errorf("expected ; got %q", b.String()) - } -} - -func TestReadFrom(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - b.ReadFrom(&buf) - empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) - } -} - -func TestWriteTo(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - buf.WriteTo(&b) - empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) - } -} - -func TestNext(t *testing.T) { - b := []byte{0, 1, 2, 3, 4} - tmp := make([]byte, 5) - for i := 0; i <= 5; i++ { - for j := i; j <= 5; j++ { - for k := 0; k <= 6; k++ { - // 0 <= i <= j <= 5; 0 <= k <= 6 - // Check that if we start with a buffer - // of length j at offset i and ask for - // Next(k), we get the right bytes. - buf := NewBuffer(b[0:j]) - n, _ := buf.Read(tmp[0:i]) - if n != i { - t.Fatalf("Read %d returned %d", i, n) - } - bb := buf.Next(k) - want := k - if want > j-i { - want = j - i - } - if len(bb) != want { - t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) - } - for l, v := range bb { - if v != byte(l+i) { - t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) - } - } - } - } - } -} - -var readBytesTests = []struct { - buffer string - delim byte - expected []string - err error -}{ - {"", 0, []string{""}, io.EOF}, - {"a\x00", 0, []string{"a\x00"}, nil}, - {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, - {"hello\x01world", 1, []string{"hello\x01"}, nil}, - {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, - {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, - {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, -} - -func TestReadBytes(t *testing.T) { - for _, test := range readBytesTests { - buf := NewBuffer([]byte(test.buffer)) - var err error - for _, expected := range test.expected { - var bytes []byte - bytes, err = buf.ReadBytes(test.delim) - if string(bytes) != expected { - t.Errorf("expected %q, got %q", expected, bytes) - } - if err != nil { - break - } - } - if err != test.err { - t.Errorf("expected error %v, got %v", test.err, err) - } - } -} - -func TestGrow(t *testing.T) { - x := []byte{'x'} - y := []byte{'y'} - tmp := make([]byte, 72) - for _, startLen := range []int{0, 100, 1000, 10000, 100000} { - xBytes := bytes.Repeat(x, startLen) - for _, growLen := range []int{0, 100, 1000, 10000, 100000} { - buf := NewBuffer(xBytes) - // If we read, this affects buf.off, which is good to test. - readBytes, _ := buf.Read(tmp) - buf.Grow(growLen) - yBytes := bytes.Repeat(y, growLen) - // Check no allocation occurs in write, as long as we're single-threaded. - var m1, m2 runtime.MemStats - runtime.ReadMemStats(&m1) - buf.Write(yBytes) - runtime.ReadMemStats(&m2) - if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { - t.Errorf("allocation occurred during write") - } - // Check that buffer has correct data. - if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { - t.Errorf("bad initial data at %d %d", startLen, growLen) - } - if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { - t.Errorf("bad written data at %d %d", startLen, growLen) - } - } - } -} - -// Was a bug: used to give EOF reading empty slice at EOF. -func TestReadEmptyAtEOF(t *testing.T) { - b := new(Buffer) - slice := make([]byte, 0) - n, err := b.Read(slice) - if err != nil { - t.Errorf("read error: %v", err) - } - if n != 0 { - t.Errorf("wrong count; got %d want 0", n) - } -} - -// Tests that we occasionally compact. Issue 5154. -func TestBufferGrowth(t *testing.T) { - var b Buffer - buf := make([]byte, 1024) - b.Write(buf[0:1]) - var cap0 int - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - if i == 0 { - cap0 = cap(b.buf) - } - } - cap1 := cap(b.buf) - // (*Buffer).grow allows for 2x capacity slop before sliding, - // so set our error threshold at 3x. - if cap1 > cap0*3 { - t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) - } -} - -// From Issue 5154. -func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf[0:1]) - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - } - } -} - -// Check that we don't compact too often. From Issue 5154. -func BenchmarkBufferFullSmallReads(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf) - for b.Len()+20 < cap(b.buf) { - b.Write(buf[:10]) - } - for i := 0; i < 5<<10; i++ { - b.Read(buf[:1]) - b.Write(buf[:1]) - } - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash_test.go deleted file mode 100644 index a35d273e..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "testing" -) - -var hashTests = []struct { - data []byte - seed uint32 - hash uint32 -}{ - {nil, 0xbc9f1d34, 0xbc9f1d34}, - {[]byte{0x62}, 0xbc9f1d34, 0xef1345c4}, - {[]byte{0xc3, 0x97}, 0xbc9f1d34, 0x5b663814}, - {[]byte{0xe2, 0x99, 0xa5}, 0xbc9f1d34, 0x323c078f}, - {[]byte{0xe1, 0x80, 0xb9, 0x32}, 0xbc9f1d34, 0xed21633a}, - {[]byte{ - 0x01, 0xc0, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x00, 0x14, - 0x00, 0x00, 0x00, 0x18, - 0x28, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - }, 0x12345678, 0xf333dabb}, -} - -func TestHash(t *testing.T) { - for i, x := range hashTests { - h := Hash(x.data, x.seed) - if h != x.hash { - t.Fatalf("test-%d: invalid hash, %#x vs %#x", i, h, x.hash) - } - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go index f3597686..80614afc 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go @@ -19,7 +19,7 @@ var ( // Releaser is the interface that wraps the basic Release method. type Releaser interface { // Release releases associated resources. Release should always success - // and can be called multipe times without causing error. + // and can be called multiple times without causing error. Release() } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/version_test.go deleted file mode 100644 index a643be10..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/version_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package leveldb - -import ( - "encoding/binary" - "reflect" - "testing" - - "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -type testFileRec struct { - level int - num int64 -} - -func TestVersionStaging(t *testing.T) { - gomega.RegisterTestingT(t) - stor := testutil.NewStorage() - defer stor.Close() - s, err := newSession(stor, nil) - if err != nil { - t.Fatal(err) - } - - v := newVersion(s) - v.newStaging() - - tmp := make([]byte, 4) - mik := func(i uint64) []byte { - binary.BigEndian.PutUint32(tmp, uint32(i)) - return []byte(makeInternalKey(nil, tmp, 0, keyTypeVal)) - } - - for i, x := range []struct { - add, del []testFileRec - levels [][]int64 - }{ - { - add: []testFileRec{ - {1, 1}, - }, - levels: [][]int64{ - {}, - {1}, - }, - }, - { - add: []testFileRec{ - {1, 1}, - }, - levels: [][]int64{ - {}, - {1}, - }, - }, - { - del: []testFileRec{ - {1, 1}, - }, - levels: [][]int64{}, - }, - { - add: []testFileRec{ - {0, 1}, - {0, 3}, - {0, 2}, - {2, 5}, - {1, 4}, - }, - levels: [][]int64{ - {3, 2, 1}, - {4}, - {5}, - }, - }, - { - add: []testFileRec{ - {1, 6}, - {2, 5}, - }, - del: []testFileRec{ - {0, 1}, - {0, 4}, - }, - levels: [][]int64{ - {3, 2}, - {4, 6}, - {5}, - }, - }, - { - del: []testFileRec{ - {0, 3}, - {0, 2}, - {1, 4}, - {1, 6}, - {2, 5}, - }, - levels: [][]int64{}, - }, - { - add: []testFileRec{ - {0, 1}, - }, - levels: [][]int64{ - {1}, - }, - }, - { - add: []testFileRec{ - {1, 2}, - }, - levels: [][]int64{ - {1}, - {2}, - }, - }, - { - add: []testFileRec{ - {0, 3}, - }, - levels: [][]int64{ - {3, 1}, - {2}, - }, - }, - { - add: []testFileRec{ - {6, 9}, - }, - levels: [][]int64{ - {3, 1}, - {2}, - {}, - {}, - {}, - {}, - {9}, - }, - }, - { - del: []testFileRec{ - {6, 9}, - }, - levels: [][]int64{ - {3, 1}, - {2}, - }, - }, - } { - rec := &sessionRecord{} - for _, f := range x.add { - ik := mik(uint64(f.num)) - rec.addTable(f.level, f.num, 1, ik, ik) - } - for _, f := range x.del { - rec.delTable(f.level, f.num) - } - vs := v.newStaging() - vs.commit(rec) - v = vs.finish() - if len(v.levels) != len(x.levels) { - t.Fatalf("#%d: invalid level count: want=%d got=%d", i, len(x.levels), len(v.levels)) - } - for j, want := range x.levels { - tables := v.levels[j] - if len(want) != len(tables) { - t.Fatalf("#%d.%d: invalid tables count: want=%d got=%d", i, j, len(want), len(tables)) - } - got := make([]int64, len(tables)) - for k, t := range tables { - got[k] = t.fd.Num - } - if !reflect.DeepEqual(want, got) { - t.Fatalf("#%d.%d: invalid tables: want=%v got=%v", i, j, want, got) - } - } - } -} diff --git a/vendor/github.com/syndtr/goleveldb/manualtest/dbstress/key.go b/vendor/github.com/syndtr/goleveldb/manualtest/dbstress/key.go deleted file mode 100644 index c9f69638..00000000 --- a/vendor/github.com/syndtr/goleveldb/manualtest/dbstress/key.go +++ /dev/null @@ -1,137 +0,0 @@ -package main - -import ( - "encoding/binary" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -type ErrIkeyCorrupted struct { - Ikey []byte - Reason string -} - -func (e *ErrIkeyCorrupted) Error() string { - return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason) -} - -func newErrIkeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(storage.FileDesc{}, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason}) -} - -type kType int - -func (kt kType) String() string { - switch kt { - case ktDel: - return "d" - case ktVal: - return "v" - } - return "x" -} - -// Value types encoded as the last component of internal keys. -// Don't modify; this value are saved to disk. -const ( - ktDel kType = iota - ktVal -) - -// ktSeek defines the kType that should be passed when constructing an -// internal key for seeking to a particular sequence number (since we -// sort sequence numbers in decreasing order and the value type is -// embedded as the low 8 bits in the sequence number in internal keys, -// we need to use the highest-numbered ValueType, not the lowest). -const ktSeek = ktVal - -const ( - // Maximum value possible for sequence number; the 8-bits are - // used by value type, so its can packed together in single - // 64-bit integer. - kMaxSeq uint64 = (uint64(1) << 56) - 1 - // Maximum value possible for packed sequence number and type. - kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek) -) - -// Maximum number encoded in bytes. -var kMaxNumBytes = make([]byte, 8) - -func init() { - binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) -} - -type iKey []byte - -func newIkey(ukey []byte, seq uint64, kt kType) iKey { - if seq > kMaxSeq { - panic("leveldb: invalid sequence number") - } else if kt > ktVal { - panic("leveldb: invalid type") - } - - ik := make(iKey, len(ukey)+8) - copy(ik, ukey) - binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt)) - return ik -} - -func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) { - if len(ik) < 8 { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length") - } - num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type") - } - ukey = ik[:len(ik)-8] - return -} - -func validIkey(ik []byte) bool { - _, _, _, err := parseIkey(ik) - return err == nil -} - -func (ik iKey) assert() { - if ik == nil { - panic("leveldb: nil iKey") - } - if len(ik) < 8 { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", ik, len(ik))) - } -} - -func (ik iKey) ukey() []byte { - ik.assert() - return ik[:len(ik)-8] -} - -func (ik iKey) num() uint64 { - ik.assert() - return binary.LittleEndian.Uint64(ik[len(ik)-8:]) -} - -func (ik iKey) parseNum() (seq uint64, kt kType) { - num := ik.num() - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", ik, len(ik), kt)) - } - return -} - -func (ik iKey) String() string { - if ik == nil { - return "" - } - - if ukey, seq, kt, err := parseIkey(ik); err == nil { - return fmt.Sprintf("%x,%s%d", ukey, kt, seq) - } else { - return "" - } -} diff --git a/vendor/github.com/syndtr/goleveldb/manualtest/dbstress/main.go b/vendor/github.com/syndtr/goleveldb/manualtest/dbstress/main.go deleted file mode 100644 index 2886475f..00000000 --- a/vendor/github.com/syndtr/goleveldb/manualtest/dbstress/main.go +++ /dev/null @@ -1,628 +0,0 @@ -package main - -import ( - "crypto/rand" - "encoding/binary" - "flag" - "fmt" - "log" - mrand "math/rand" - "net/http" - _ "net/http/pprof" - "os" - "os/signal" - "path" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/table" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - dbPath = path.Join(os.TempDir(), "goleveldb-testdb") - openFilesCacheCapacity = 500 - keyLen = 63 - valueLen = 256 - numKeys = arrayInt{100000, 1332, 531, 1234, 9553, 1024, 35743} - httpProf = "127.0.0.1:5454" - transactionProb = 0.5 - enableBlockCache = false - enableCompression = false - enableBufferPool = false - - wg = new(sync.WaitGroup) - done, fail uint32 - - bpool *util.BufferPool -) - -type arrayInt []int - -func (a arrayInt) String() string { - var str string - for i, n := range a { - if i > 0 { - str += "," - } - str += strconv.Itoa(n) - } - return str -} - -func (a *arrayInt) Set(str string) error { - var na arrayInt - for _, s := range strings.Split(str, ",") { - s = strings.TrimSpace(s) - if s != "" { - n, err := strconv.Atoi(s) - if err != nil { - return err - } - na = append(na, n) - } - } - *a = na - return nil -} - -func init() { - flag.StringVar(&dbPath, "db", dbPath, "testdb path") - flag.IntVar(&openFilesCacheCapacity, "openfilescachecap", openFilesCacheCapacity, "open files cache capacity") - flag.IntVar(&keyLen, "keylen", keyLen, "key length") - flag.IntVar(&valueLen, "valuelen", valueLen, "value length") - flag.Var(&numKeys, "numkeys", "num keys") - flag.StringVar(&httpProf, "httpprof", httpProf, "http pprof listen addr") - flag.Float64Var(&transactionProb, "transactionprob", transactionProb, "probablity of writes using transaction") - flag.BoolVar(&enableBufferPool, "enablebufferpool", enableBufferPool, "enable buffer pool") - flag.BoolVar(&enableBlockCache, "enableblockcache", enableBlockCache, "enable block cache") - flag.BoolVar(&enableCompression, "enablecompression", enableCompression, "enable block compression") -} - -func randomData(dst []byte, ns, prefix byte, i uint32, dataLen int) []byte { - if dataLen < (2+4+4)*2+4 { - panic("dataLen is too small") - } - if cap(dst) < dataLen { - dst = make([]byte, dataLen) - } else { - dst = dst[:dataLen] - } - half := (dataLen - 4) / 2 - if _, err := rand.Reader.Read(dst[2 : half-8]); err != nil { - panic(err) - } - dst[0] = ns - dst[1] = prefix - binary.LittleEndian.PutUint32(dst[half-8:], i) - binary.LittleEndian.PutUint32(dst[half-8:], i) - binary.LittleEndian.PutUint32(dst[half-4:], util.NewCRC(dst[:half-4]).Value()) - full := half * 2 - copy(dst[half:full], dst[:half]) - if full < dataLen-4 { - if _, err := rand.Reader.Read(dst[full : dataLen-4]); err != nil { - panic(err) - } - } - binary.LittleEndian.PutUint32(dst[dataLen-4:], util.NewCRC(dst[:dataLen-4]).Value()) - return dst -} - -func dataSplit(data []byte) (data0, data1 []byte) { - n := (len(data) - 4) / 2 - return data[:n], data[n : n+n] -} - -func dataNS(data []byte) byte { - return data[0] -} - -func dataPrefix(data []byte) byte { - return data[1] -} - -func dataI(data []byte) uint32 { - return binary.LittleEndian.Uint32(data[(len(data)-4)/2-8:]) -} - -func dataChecksum(data []byte) (uint32, uint32) { - checksum0 := binary.LittleEndian.Uint32(data[len(data)-4:]) - checksum1 := util.NewCRC(data[:len(data)-4]).Value() - return checksum0, checksum1 -} - -func dataPrefixSlice(ns, prefix byte) *util.Range { - return util.BytesPrefix([]byte{ns, prefix}) -} - -func dataNsSlice(ns byte) *util.Range { - return util.BytesPrefix([]byte{ns}) -} - -type testingStorage struct { - storage.Storage -} - -func (ts *testingStorage) scanTable(fd storage.FileDesc, checksum bool) (corrupted bool) { - r, err := ts.Open(fd) - if err != nil { - log.Fatal(err) - } - defer r.Close() - - size, err := r.Seek(0, os.SEEK_END) - if err != nil { - log.Fatal(err) - } - - o := &opt.Options{ - DisableLargeBatchTransaction: true, - Strict: opt.NoStrict, - } - if checksum { - o.Strict = opt.StrictBlockChecksum | opt.StrictReader - } - tr, err := table.NewReader(r, size, fd, nil, bpool, o) - if err != nil { - log.Fatal(err) - } - defer tr.Release() - - checkData := func(i int, t string, data []byte) bool { - if len(data) == 0 { - panic(fmt.Sprintf("[%v] nil data: i=%d t=%s", fd, i, t)) - } - - checksum0, checksum1 := dataChecksum(data) - if checksum0 != checksum1 { - atomic.StoreUint32(&fail, 1) - atomic.StoreUint32(&done, 1) - corrupted = true - - data0, data1 := dataSplit(data) - data0c0, data0c1 := dataChecksum(data0) - data1c0, data1c1 := dataChecksum(data1) - log.Printf("FATAL: [%v] Corrupted data i=%d t=%s (%#x != %#x): %x(%v) vs %x(%v)", - fd, i, t, checksum0, checksum1, data0, data0c0 == data0c1, data1, data1c0 == data1c1) - return true - } - return false - } - - iter := tr.NewIterator(nil, nil) - defer iter.Release() - for i := 0; iter.Next(); i++ { - ukey, _, kt, kerr := parseIkey(iter.Key()) - if kerr != nil { - atomic.StoreUint32(&fail, 1) - atomic.StoreUint32(&done, 1) - corrupted = true - - log.Printf("FATAL: [%v] Corrupted ikey i=%d: %v", fd, i, kerr) - return - } - if checkData(i, "key", ukey) { - return - } - if kt == ktVal && checkData(i, "value", iter.Value()) { - return - } - } - if err := iter.Error(); err != nil { - if errors.IsCorrupted(err) { - atomic.StoreUint32(&fail, 1) - atomic.StoreUint32(&done, 1) - corrupted = true - - log.Printf("FATAL: [%v] Corruption detected: %v", fd, err) - } else { - log.Fatal(err) - } - } - - return -} - -func (ts *testingStorage) Remove(fd storage.FileDesc) error { - if atomic.LoadUint32(&fail) == 1 { - return nil - } - - if fd.Type == storage.TypeTable { - if ts.scanTable(fd, true) { - return nil - } - } - return ts.Storage.Remove(fd) -} - -type latencyStats struct { - mark time.Time - dur, min, max time.Duration - num int -} - -func (s *latencyStats) start() { - s.mark = time.Now() -} - -func (s *latencyStats) record(n int) { - if s.mark.IsZero() { - panic("not started") - } - dur := time.Now().Sub(s.mark) - dur1 := dur / time.Duration(n) - if dur1 < s.min || s.min == 0 { - s.min = dur1 - } - if dur1 > s.max { - s.max = dur1 - } - s.dur += dur - s.num += n - s.mark = time.Time{} -} - -func (s *latencyStats) ratePerSec() int { - durSec := s.dur / time.Second - if durSec > 0 { - return s.num / int(durSec) - } - return s.num -} - -func (s *latencyStats) avg() time.Duration { - if s.num > 0 { - return s.dur / time.Duration(s.num) - } - return 0 -} - -func (s *latencyStats) add(x *latencyStats) { - if x.min < s.min || s.min == 0 { - s.min = x.min - } - if x.max > s.max { - s.max = x.max - } - s.dur += x.dur - s.num += x.num -} - -func main() { - flag.Parse() - - if enableBufferPool { - bpool = util.NewBufferPool(opt.DefaultBlockSize + 128) - } - - log.Printf("Test DB stored at %q", dbPath) - if httpProf != "" { - log.Printf("HTTP pprof listening at %q", httpProf) - runtime.SetBlockProfileRate(1) - go func() { - if err := http.ListenAndServe(httpProf, nil); err != nil { - log.Fatalf("HTTPPROF: %v", err) - } - }() - } - - runtime.GOMAXPROCS(runtime.NumCPU()) - - os.RemoveAll(dbPath) - stor, err := storage.OpenFile(dbPath, false) - if err != nil { - log.Fatal(err) - } - tstor := &testingStorage{stor} - defer tstor.Close() - - fatalf := func(err error, format string, v ...interface{}) { - atomic.StoreUint32(&fail, 1) - atomic.StoreUint32(&done, 1) - log.Printf("FATAL: "+format, v...) - if err != nil && errors.IsCorrupted(err) { - cerr := err.(*errors.ErrCorrupted) - if !cerr.Fd.Zero() && cerr.Fd.Type == storage.TypeTable { - log.Print("FATAL: corruption detected, scanning...") - if !tstor.scanTable(storage.FileDesc{Type: storage.TypeTable, Num: cerr.Fd.Num}, false) { - log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.Fd) - } - } - } - runtime.Goexit() - } - - if openFilesCacheCapacity == 0 { - openFilesCacheCapacity = -1 - } - o := &opt.Options{ - OpenFilesCacheCapacity: openFilesCacheCapacity, - DisableBufferPool: !enableBufferPool, - DisableBlockCache: !enableBlockCache, - ErrorIfExist: true, - Compression: opt.NoCompression, - } - if enableCompression { - o.Compression = opt.DefaultCompression - } - - db, err := leveldb.Open(tstor, o) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - var ( - mu = &sync.Mutex{} - gGetStat = &latencyStats{} - gIterStat = &latencyStats{} - gWriteStat = &latencyStats{} - gTrasactionStat = &latencyStats{} - startTime = time.Now() - - writeReq = make(chan *leveldb.Batch) - writeAck = make(chan error) - writeAckAck = make(chan struct{}) - ) - - go func() { - for b := range writeReq { - - var err error - if mrand.Float64() < transactionProb { - log.Print("> Write using transaction") - gTrasactionStat.start() - var tr *leveldb.Transaction - if tr, err = db.OpenTransaction(); err == nil { - if err = tr.Write(b, nil); err == nil { - if err = tr.Commit(); err == nil { - gTrasactionStat.record(b.Len()) - } - } else { - tr.Discard() - } - } - } else { - gWriteStat.start() - if err = db.Write(b, nil); err == nil { - gWriteStat.record(b.Len()) - } - } - writeAck <- err - <-writeAckAck - } - }() - - go func() { - for { - time.Sleep(3 * time.Second) - - log.Print("------------------------") - - log.Printf("> Elapsed=%v", time.Now().Sub(startTime)) - mu.Lock() - log.Printf("> GetLatencyMin=%v GetLatencyMax=%v GetLatencyAvg=%v GetRatePerSec=%d", - gGetStat.min, gGetStat.max, gGetStat.avg(), gGetStat.ratePerSec()) - log.Printf("> IterLatencyMin=%v IterLatencyMax=%v IterLatencyAvg=%v IterRatePerSec=%d", - gIterStat.min, gIterStat.max, gIterStat.avg(), gIterStat.ratePerSec()) - log.Printf("> WriteLatencyMin=%v WriteLatencyMax=%v WriteLatencyAvg=%v WriteRatePerSec=%d", - gWriteStat.min, gWriteStat.max, gWriteStat.avg(), gWriteStat.ratePerSec()) - log.Printf("> TransactionLatencyMin=%v TransactionLatencyMax=%v TransactionLatencyAvg=%v TransactionRatePerSec=%d", - gTrasactionStat.min, gTrasactionStat.max, gTrasactionStat.avg(), gTrasactionStat.ratePerSec()) - mu.Unlock() - - cachedblock, _ := db.GetProperty("leveldb.cachedblock") - openedtables, _ := db.GetProperty("leveldb.openedtables") - alivesnaps, _ := db.GetProperty("leveldb.alivesnaps") - aliveiters, _ := db.GetProperty("leveldb.aliveiters") - blockpool, _ := db.GetProperty("leveldb.blockpool") - log.Printf("> BlockCache=%s OpenedTables=%s AliveSnaps=%s AliveIter=%s BlockPool=%q", - cachedblock, openedtables, alivesnaps, aliveiters, blockpool) - - log.Print("------------------------") - } - }() - - for ns, numKey := range numKeys { - func(ns, numKey int) { - log.Printf("[%02d] STARTING: numKey=%d", ns, numKey) - - keys := make([][]byte, numKey) - for i := range keys { - keys[i] = randomData(nil, byte(ns), 1, uint32(i), keyLen) - } - - wg.Add(1) - go func() { - var wi uint32 - defer func() { - log.Printf("[%02d] WRITER DONE #%d", ns, wi) - wg.Done() - }() - - var ( - b = new(leveldb.Batch) - k2, v2 []byte - nReader int32 - ) - for atomic.LoadUint32(&done) == 0 { - log.Printf("[%02d] WRITER #%d", ns, wi) - - b.Reset() - for _, k1 := range keys { - k2 = randomData(k2, byte(ns), 2, wi, keyLen) - v2 = randomData(v2, byte(ns), 3, wi, valueLen) - b.Put(k2, v2) - b.Put(k1, k2) - } - writeReq <- b - if err := <-writeAck; err != nil { - writeAckAck <- struct{}{} - fatalf(err, "[%02d] WRITER #%d db.Write: %v", ns, wi, err) - } - - snap, err := db.GetSnapshot() - if err != nil { - writeAckAck <- struct{}{} - fatalf(err, "[%02d] WRITER #%d db.GetSnapshot: %v", ns, wi, err) - } - - writeAckAck <- struct{}{} - - wg.Add(1) - atomic.AddInt32(&nReader, 1) - go func(snapwi uint32, snap *leveldb.Snapshot) { - var ( - ri int - iterStat = &latencyStats{} - getStat = &latencyStats{} - ) - defer func() { - mu.Lock() - gGetStat.add(getStat) - gIterStat.add(iterStat) - mu.Unlock() - - atomic.AddInt32(&nReader, -1) - log.Printf("[%02d] READER #%d.%d DONE Snap=%v Alive=%d IterLatency=%v GetLatency=%v", ns, snapwi, ri, snap, atomic.LoadInt32(&nReader), iterStat.avg(), getStat.avg()) - snap.Release() - wg.Done() - }() - - stopi := snapwi + 3 - for (ri < 3 || atomic.LoadUint32(&wi) < stopi) && atomic.LoadUint32(&done) == 0 { - var n int - iter := snap.NewIterator(dataPrefixSlice(byte(ns), 1), nil) - iterStat.start() - for iter.Next() { - k1 := iter.Key() - k2 := iter.Value() - iterStat.record(1) - - if dataNS(k2) != byte(ns) { - fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key NS: want=%d got=%d", ns, snapwi, ri, n, ns, dataNS(k2)) - } - - kwritei := dataI(k2) - if kwritei != snapwi { - fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key iter num: %d", ns, snapwi, ri, n, kwritei) - } - - getStat.start() - v2, err := snap.Get(k2, nil) - if err != nil { - fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2) - } - getStat.record(1) - - if checksum0, checksum1 := dataChecksum(v2); checksum0 != checksum1 { - err := &errors.ErrCorrupted{Fd: storage.FileDesc{0xff, 0}, Err: fmt.Errorf("v2: %x: checksum mismatch: %v vs %v", v2, checksum0, checksum1)} - fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2) - } - - n++ - iterStat.start() - } - iter.Release() - if err := iter.Error(); err != nil { - fatalf(err, "[%02d] READER #%d.%d K%d iter.Error: %v", ns, snapwi, ri, numKey, err) - } - if n != numKey { - fatalf(nil, "[%02d] READER #%d.%d missing keys: want=%d got=%d", ns, snapwi, ri, numKey, n) - } - - ri++ - } - }(wi, snap) - - atomic.AddUint32(&wi, 1) - } - }() - - delB := new(leveldb.Batch) - wg.Add(1) - go func() { - var ( - i int - iterStat = &latencyStats{} - ) - defer func() { - log.Printf("[%02d] SCANNER DONE #%d", ns, i) - wg.Done() - }() - - time.Sleep(2 * time.Second) - - for atomic.LoadUint32(&done) == 0 { - var n int - delB.Reset() - iter := db.NewIterator(dataNsSlice(byte(ns)), nil) - iterStat.start() - for iter.Next() && atomic.LoadUint32(&done) == 0 { - k := iter.Key() - v := iter.Value() - iterStat.record(1) - - for ci, x := range [...][]byte{k, v} { - checksum0, checksum1 := dataChecksum(x) - if checksum0 != checksum1 { - if ci == 0 { - fatalf(nil, "[%02d] SCANNER %d.%d invalid key checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v) - } else { - fatalf(nil, "[%02d] SCANNER %d.%d invalid value checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v) - } - } - } - - if dataPrefix(k) == 2 || mrand.Int()%999 == 0 { - delB.Delete(k) - } - - n++ - iterStat.start() - } - iter.Release() - if err := iter.Error(); err != nil { - fatalf(err, "[%02d] SCANNER #%d.%d iter.Error: %v", ns, i, n, err) - } - - if n > 0 { - log.Printf("[%02d] SCANNER #%d IterLatency=%v", ns, i, iterStat.avg()) - } - - if delB.Len() > 0 && atomic.LoadUint32(&done) == 0 { - t := time.Now() - writeReq <- delB - if err := <-writeAck; err != nil { - writeAckAck <- struct{}{} - fatalf(err, "[%02d] SCANNER #%d db.Write: %v", ns, i, err) - } else { - writeAckAck <- struct{}{} - } - log.Printf("[%02d] SCANNER #%d Deleted=%d Time=%v", ns, i, delB.Len(), time.Now().Sub(t)) - } - - i++ - } - }() - }(ns, numKey) - } - - go func() { - sig := make(chan os.Signal) - signal.Notify(sig, os.Interrupt, os.Kill) - log.Printf("Got signal: %v, exiting...", <-sig) - atomic.StoreUint32(&done, 1) - }() - - wg.Wait() -} diff --git a/vendor/github.com/syndtr/goleveldb/manualtest/filelock/main.go b/vendor/github.com/syndtr/goleveldb/manualtest/filelock/main.go deleted file mode 100644 index 192951fa..00000000 --- a/vendor/github.com/syndtr/goleveldb/manualtest/filelock/main.go +++ /dev/null @@ -1,85 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "os" - "os/exec" - "path/filepath" - - "github.com/syndtr/goleveldb/leveldb/storage" -) - -var ( - filename string - child bool -) - -func init() { - flag.StringVar(&filename, "filename", filepath.Join(os.TempDir(), "goleveldb_filelock_test"), "Filename used for testing") - flag.BoolVar(&child, "child", false, "This is the child") -} - -func runChild() error { - var args []string - args = append(args, os.Args[1:]...) - args = append(args, "-child") - cmd := exec.Command(os.Args[0], args...) - var out bytes.Buffer - cmd.Stdout = &out - err := cmd.Run() - r := bufio.NewReader(&out) - for { - line, _, e1 := r.ReadLine() - if e1 != nil { - break - } - fmt.Println("[Child]", string(line)) - } - return err -} - -func main() { - flag.Parse() - - fmt.Printf("Using path: %s\n", filename) - if child { - fmt.Println("Child flag set.") - } - - stor, err := storage.OpenFile(filename, false) - if err != nil { - fmt.Printf("Could not open storage: %s", err) - os.Exit(10) - } - - if !child { - fmt.Println("Executing child -- first test (expecting error)") - err := runChild() - if err == nil { - fmt.Println("Expecting error from child") - } else if err.Error() != "exit status 10" { - fmt.Println("Got unexpected error from child:", err) - } else { - fmt.Printf("Got error from child: %s (expected)\n", err) - } - } - - err = stor.Close() - if err != nil { - fmt.Printf("Error when closing storage: %s", err) - os.Exit(11) - } - - if !child { - fmt.Println("Executing child -- second test") - err := runChild() - if err != nil { - fmt.Println("Got unexpected error from child:", err) - } - } - - os.RemoveAll(filename) -} diff --git a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000..55ede8a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md new file mode 100644 index 00000000..00059242 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md @@ -0,0 +1,41 @@ +# gojsonpointer +An implementation of JSON Pointer - Go language + +## Usage + jsonText := `{ + "name": "Bobby B", + "occupation": { + "title" : "King", + "years" : 15, + "heir" : "Joffrey B" + } + }` + + var jsonDocument map[string]interface{} + json.Unmarshal([]byte(jsonText), &jsonDocument) + + //create a JSON pointer + pointerString := "/occupation/title" + pointer, _ := NewJsonPointer(pointerString) + + //SET a new value for the "title" in the document + pointer.Set(jsonDocument, "Supreme Leader of Westeros") + + //GET the new "title" from the document + title, _, _ := pointer.Get(jsonDocument) + fmt.Println(title) //outputs "Supreme Leader of Westeros" + + //DELETE the "heir" from the document + deletePointer := NewJsonPointer("/occupation/heir") + deletePointer.Delete(jsonDocument) + + b, _ := json.Marshal(jsonDocument) + fmt.Println(string(b)) + //outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}` + + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go new file mode 100644 index 00000000..7faf5d7f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go @@ -0,0 +1,211 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package gojsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +const ( + const_empty_pointer = `` + const_pointer_separator = `/` + + const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` +) + +type implStruct struct { + mode string // "SET" or "GET" + + inDocument interface{} + + setInValue interface{} + + getOutNode interface{} + getOutKind reflect.Kind + outError error +} + +type JsonPointer struct { + referenceTokens []string +} + +// NewJsonPointer parses the given string JSON pointer and returns an object +func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { + + // Pointer to the root of the document + if len(jsonPointerString) == 0 { + // Keep referenceTokens nil + return + } + if jsonPointerString[0] != '/' { + return p, errors.New(const_invalid_start) + } + + p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) + return +} + +// Uses the pointer to retrieve a value from a JSON document +func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + + is := &implStruct{mode: "GET", inDocument: document} + p.implementation(is) + return is.getOutNode, is.getOutKind, is.outError + +} + +// Uses the pointer to update a value from a JSON document +func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { + + is := &implStruct{mode: "SET", inDocument: document, setInValue: value} + p.implementation(is) + return document, is.outError + +} + +// Uses the pointer to delete a value from a JSON document +func (p *JsonPointer) Delete(document interface{}) (interface{}, error) { + is := &implStruct{mode: "DEL", inDocument: document} + p.implementation(is) + return document, is.outError +} + +// Both Get and Set functions use the same implementation to avoid code duplication +func (p *JsonPointer) implementation(i *implStruct) { + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + i.getOutNode = i.inDocument + i.outError = nil + i.getOutKind = kind + i.outError = nil + return + } + + node := i.inDocument + + previousNodes := make([]interface{}, len(p.referenceTokens)) + previousTokens := make([]string, len(p.referenceTokens)) + + for ti, token := range p.referenceTokens { + + isLastToken := ti == len(p.referenceTokens)-1 + previousNodes[ti] = node + previousTokens[ti] = token + + switch v := node.(type) { + + case map[string]interface{}: + decodedToken := decodeReferenceToken(token) + if _, ok := v[decodedToken]; ok { + node = v[decodedToken] + if isLastToken && i.mode == "SET" { + v[decodedToken] = i.setInValue + } else if isLastToken && i.mode =="DEL" { + delete(v,decodedToken) + } + } else if (isLastToken && i.mode == "SET") { + v[decodedToken] = i.setInValue + } else { + i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) + i.getOutKind = reflect.Map + i.getOutNode = nil + return + } + + case []interface{}: + tokenIndex, err := strconv.Atoi(token) + if err != nil { + i.outError = fmt.Errorf("Invalid array index '%s'", token) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + if tokenIndex < 0 || tokenIndex >= len(v) { + i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + + node = v[tokenIndex] + if isLastToken && i.mode == "SET" { + v[tokenIndex] = i.setInValue + } else if isLastToken && i.mode =="DEL" { + v[tokenIndex] = v[len(v)-1] + v[len(v)-1] = nil + v = v[:len(v)-1] + previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v + } + + default: + i.outError = fmt.Errorf("Invalid token reference '%s'", token) + i.getOutKind = reflect.ValueOf(node).Kind() + i.getOutNode = nil + return + } + + } + + i.getOutNode = node + i.getOutKind = reflect.ValueOf(node).Kind() + i.outError = nil +} + +// Pointer to string representation function +func (p *JsonPointer) String() string { + + if len(p.referenceTokens) == 0 { + return const_empty_pointer + } + + pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +func decodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~1`, `/`, -1) + step2 := strings.Replace(step1, `~0`, `~`, -1) + return step2 +} + +func encodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~`, `~0`, -1) + step2 := strings.Replace(step1, `/`, `~1`, -1) + return step2 +} diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000..55ede8a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md new file mode 100644 index 00000000..9ab6e1eb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/README.md @@ -0,0 +1,10 @@ +# gojsonreference +An implementation of JSON Reference - Go language + +## Dependencies +https://github.com/xeipuuv/gojsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go new file mode 100644 index 00000000..64572913 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/reference.go @@ -0,0 +1,147 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package gojsonreference + +import ( + "errors" + "net/url" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonpointer" +) + +const ( + const_fragment_char = `#` +) + +func NewJsonReference(jsonReferenceString string) (JsonReference, error) { + + var r JsonReference + err := r.parse(jsonReferenceString) + return r, err + +} + +type JsonReference struct { + referenceUrl *url.URL + referencePointer gojsonpointer.JsonPointer + + HasFullUrl bool + HasUrlPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +func (r *JsonReference) GetUrl() *url.URL { + return r.referenceUrl +} + +func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { + return &r.referencePointer +} + +func (r *JsonReference) String() string { + + if r.referenceUrl != nil { + return r.referenceUrl.String() + } + + if r.HasFragmentOnly { + return const_fragment_char + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +func (r *JsonReference) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) +} + +// "Constructor", parses the given string JSON reference +func (r *JsonReference) parse(jsonReferenceString string) (err error) { + + r.referenceUrl, err = url.Parse(jsonReferenceString) + if err != nil { + return + } + refUrl := r.referenceUrl + + if refUrl.Scheme != "" && refUrl.Host != "" { + r.HasFullUrl = true + } else { + if refUrl.Path != "" { + r.HasUrlPathOnly = true + } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refUrl.Scheme == "file" + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, and if it + // doesn't then its first component will be treated as the host by the + // Go runtime + if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) + } + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path) + } + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) + + return +} + +// Creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { + if child.GetUrl() == nil { + return nil, errors.New("childUrl is nil!") + } + + if r.GetUrl() == nil { + return nil, errors.New("parentUrl is nil!") + } + + // Get a copy of the parent url to make sure we do not modify the original. + // URL reference resolving fails if the fragment of the child is empty, but the parent's is not. + // The fragment of the child must be used, so the fragment of the parent is manually removed. + parentUrl := *r.GetUrl() + parentUrl.Fragment = "" + + ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String()) + if err != nil { + return nil, err + } + return &ref, err +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore new file mode 100644 index 00000000..68e993ce --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.gitignore @@ -0,0 +1,3 @@ +*.sw[nop] +*.iml +.vscode/ diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml new file mode 100644 index 00000000..3289001c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - "1.11" + - "1.12" + - "1.13" +before_install: + - go get github.com/xeipuuv/gojsonreference + - go get github.com/xeipuuv/gojsonpointer + - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000..55ede8a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md new file mode 100644 index 00000000..758f26df --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/README.md @@ -0,0 +1,466 @@ +[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema) +[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) +[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema) + +# gojsonschema + +## Description + +An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07. + +References : + +* http://json-schema.org +* http://json-schema.org/latest/json-schema-core.html +* http://json-schema.org/latest/json-schema-validation.html + +## Installation + +``` +go get github.com/xeipuuv/gojsonschema +``` + +Dependencies : +* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) +* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) +* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) + +## Usage + +### Example + +```go + +package main + +import ( + "fmt" + "github.com/xeipuuv/gojsonschema" +) + +func main() { + + schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") + documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + panic(err.Error()) + } + + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, desc := range result.Errors() { + fmt.Printf("- %s\n", desc) + } + } +} + + +``` + +#### Loaders + +There are various ways to load your JSON data. +In order to load your schemas and documents, +first declare an appropriate loader : + +* Web / HTTP, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") +``` + +* Local file, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") +``` + +References use the URI scheme, the prefix (file://) and a full path to the file are required. + +* JSON strings : + +```go +loader := gojsonschema.NewStringLoader(`{"type": "string"}`) +``` + +* Custom Go types : + +```go +m := map[string]interface{}{"type": "string"} +loader := gojsonschema.NewGoLoader(m) +``` + +And + +```go +type Root struct { + Users []User `json:"users"` +} + +type User struct { + Name string `json:"name"` +} + +... + +data := Root{} +data.Users = append(data.Users, User{"John"}) +data.Users = append(data.Users, User{"Sophia"}) +data.Users = append(data.Users, User{"Bill"}) + +loader := gojsonschema.NewGoLoader(data) +``` + +#### Validation + +Once the loaders are set, validation is easy : + +```go +result, err := gojsonschema.Validate(schemaLoader, documentLoader) +``` + +Alternatively, you might want to load a schema only once and process to multiple validations : + +```go +schema, err := gojsonschema.NewSchema(schemaLoader) +... +result1, err := schema.Validate(documentLoader1) +... +result2, err := schema.Validate(documentLoader2) +... +// etc ... +``` + +To check the result : + +```go + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, err := range result.Errors() { + // Err implements the ResultError interface + fmt.Printf("- %s\n", err) + } + } +``` + + +## Loading local schemas + +By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`. + +```go + sl := gojsonschema.NewSchemaLoader() + loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`) + err := sl.AddSchema("http://some_host.com/string.json", loader1) +``` + +Alternatively if your schema already has an `$id` you can use the `AddSchemas` function +```go + loader2 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/maxlength.json", + "maxLength" : 5 + }`) + err = sl.AddSchemas(loader2) +``` + +The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them. +```go + loader3 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/main.json", + "allOf" : [ + { "$ref" : "http://some_host.com/string.json" }, + { "$ref" : "http://some_host.com/maxlength.json" } + ] + }`) + + schema, err := sl.Compile(loader3) + + documentLoader := gojsonschema.NewStringLoader(`"hello world"`) + + result, err := schema.Validate(documentLoader) +``` + +It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema. + +```go +err = sl.AddSchemas(loader3) +schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json")) +``` + +Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used. + +## Using a specific draft +By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode. + +Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Draft = gojsonschema.Draft7 +sl.AutoDetect = false +``` + +If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas. + +## Meta-schema validation +Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property. + +The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Validate = true +err := sl.AddSchemas(gojsonschema.NewStringLoader(`{ + $id" : "http://some_host.com/invalid.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "multipleOf" : true +}`)) + ``` +``` + ``` + +Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema. + +Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used. + + +## Working with Errors + +The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it +```go +gojsonschema.Locale = YourCustomLocale{} +``` + +However, each error contains additional contextual information. + +Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens. + +**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below + +Note: An error of RequiredType has an err.Type() return value of "required" + + "required": RequiredError + "invalid_type": InvalidTypeError + "number_any_of": NumberAnyOfError + "number_one_of": NumberOneOfError + "number_all_of": NumberAllOfError + "number_not": NumberNotError + "missing_dependency": MissingDependencyError + "internal": InternalError + "const": ConstEror + "enum": EnumError + "array_no_additional_items": ArrayNoAdditionalItemsError + "array_min_items": ArrayMinItemsError + "array_max_items": ArrayMaxItemsError + "unique": ItemsMustBeUniqueError + "contains" : ArrayContainsError + "array_min_properties": ArrayMinPropertiesError + "array_max_properties": ArrayMaxPropertiesError + "additional_property_not_allowed": AdditionalPropertyNotAllowedError + "invalid_property_pattern": InvalidPropertyPatternError + "invalid_property_name": InvalidPropertyNameError + "string_gte": StringLengthGTEError + "string_lte": StringLengthLTEError + "pattern": DoesNotMatchPatternError + "multiple_of": MultipleOfError + "number_gte": NumberGTEError + "number_gt": NumberGTError + "number_lte": NumberLTEError + "number_lt": NumberLTError + "condition_then" : ConditionThenError + "condition_else" : ConditionElseError + +**err.Value()**: *interface{}* Returns the value given + +**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName + +**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. + +**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. + +**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result. + +**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* + +Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. +``` +{{.field}} must be greater than or equal to {{.min}} +``` + +The library allows you to specify custom template functions, should you require more complex error message handling. +```go +gojsonschema.ErrorTemplateFuncs = map[string]interface{}{ + "allcaps": func(s string) string { + return strings.ToUpper(s) + }, +} +``` + +Given the above definition, you can use the custom function `"allcaps"` in your localization templates: +``` +{{allcaps .field}} must be greater than or equal to {{.min}} +``` + +The above error message would then be rendered with the `field` value in capital letters. For example: +``` +"PASSWORD must be greater than or equal to 8" +``` + +Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type. + +## Formats +JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: + +````json +{"type": "string", "format": "email"} +```` + +Not all formats defined in draft-07 are available. Implemented formats are: + +* `date` +* `time` +* `date-time` +* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames. +* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support. +* `idn-email`. Same caveat as `email`. +* `ipv4` +* `ipv6` +* `uri`. Includes unicode support. +* `uri-reference`. Includes unicode support. +* `iri` +* `iri-reference` +* `uri-template` +* `uuid` +* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible. +* `json-pointer` +* `relative-json-pointer` + +`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific +unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats. + +The validation code for `uri`, `idn-email` and their relatives use mostly standard library code. + +For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: + +```go +// Define the format checker +type RoleFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f RoleFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + return strings.HasPrefix("ROLE_", asString) +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) +```` + +Now to use in your json schema: +````json +{"type": "string", "format": "role"} +```` + +Another example would be to check if the provided integer matches an id on database: + +JSON schema: +```json +{"type": "integer", "format": "ValidUserId"} +``` + +```go +// Define the format checker +type ValidUserIdFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool { + + asFloat64, ok := input.(float64) // Numbers are always float64 here + if ok == false { + return false + } + + // XXX + // do the magic on the database looking for the int(asFloat64) + + return true +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) +```` + +Formats can also be removed, for example if you want to override one of the formats that is defined by default. + +```go +gojsonschema.FormatCheckers.Remove("hostname") +``` + + +## Additional custom validation +After the validation has run and you have the results, you may add additional +errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead +of having to add special exceptions for your own errors. Below is an example. + +```go +type AnswerInvalidError struct { + gojsonschema.ResultErrorFields +} + +func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError { + err := AnswerInvalidError{} + err.SetContext(context) + err.SetType("custom_invalid_error") + // it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed + // using the description of err will be overridden by this. + err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}") + err.SetValue(value) + err.SetDetails(details) + + return &err +} + +func main() { + // ... + schema, err := gojsonschema.NewSchema(schemaLoader) + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + + if true { // some validation + jsonContext := gojsonschema.NewJsonContext("question", nil) + errDetail := gojsonschema.ErrorDetails{ + "answer": 42, + } + result.AddError( + newAnswerInvalidError( + gojsonschema.NewJsonContext("answer", jsonContext), + 52, + errDetail, + ), + errDetail, + ) + } + + return result, err + +} +``` + +This is especially useful if you want to add validation beyond what the +json schema drafts can provide such business specific logic. + +## Uses + +gojsonschema uses the following test suite : + +https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/xeipuuv/gojsonschema/draft.go b/vendor/github.com/xeipuuv/gojsonschema/draft.go new file mode 100644 index 00000000..61298e7a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/draft.go @@ -0,0 +1,125 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "errors" + "math" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +// Draft is a JSON-schema draft version +type Draft int + +// Supported Draft versions +const ( + Draft4 Draft = 4 + Draft6 Draft = 6 + Draft7 Draft = 7 + Hybrid Draft = math.MaxInt32 +) + +type draftConfig struct { + Version Draft + MetaSchemaURL string + MetaSchema string +} +type draftConfigs []draftConfig + +var drafts draftConfigs + +func init() { + drafts = []draftConfig{ + { + Version: Draft4, + MetaSchemaURL: "http://json-schema.org/draft-04/schema", + MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`, + }, + { + Version: Draft6, + MetaSchemaURL: "http://json-schema.org/draft-06/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`, + }, + { + Version: Draft7, + MetaSchemaURL: "http://json-schema.org/draft-07/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`, + }, + } +} + +func (dc draftConfigs) GetMetaSchema(url string) string { + for _, config := range dc { + if config.MetaSchemaURL == url { + return config.MetaSchema + } + } + return "" +} +func (dc draftConfigs) GetDraftVersion(url string) *Draft { + for _, config := range dc { + if config.MetaSchemaURL == url { + return &config.Version + } + } + return nil +} +func (dc draftConfigs) GetSchemaURL(draft Draft) string { + for _, config := range dc { + if config.Version == draft { + return config.MetaSchemaURL + } + } + return "" +} + +func parseSchemaURL(documentNode interface{}) (string, *Draft, error) { + + if isKind(documentNode, reflect.Bool) { + return "", nil, nil + } + + if !isKind(documentNode, reflect.Map) { + return "", nil, errors.New("schema is invalid") + } + + m := documentNode.(map[string]interface{}) + + if existsMapKey(m, KEY_SCHEMA) { + if !isKind(m[KEY_SCHEMA], reflect.String) { + return "", nil, errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": KEY_SCHEMA, + "type": TYPE_STRING, + }, + )) + } + + schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string)) + + if err != nil { + return "", nil, err + } + + schema := schemaReference.String() + + return schema, drafts.GetDraftVersion(schema), nil + } + + return "", nil, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go new file mode 100644 index 00000000..e4e9814f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go @@ -0,0 +1,364 @@ +package gojsonschema + +import ( + "bytes" + "sync" + "text/template" +) + +var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}} + +// template.Template is not thread-safe for writing, so some locking is done +// sync.RWMutex is used for efficiently locking when new templates are created +type errorTemplate struct { + *template.Template + sync.RWMutex +} + +type ( + + // FalseError. ErrorDetails: - + FalseError struct { + ResultErrorFields + } + + // RequiredError indicates that a required field is missing + // ErrorDetails: property string + RequiredError struct { + ResultErrorFields + } + + // InvalidTypeError indicates that a field has the incorrect type + // ErrorDetails: expected, given + InvalidTypeError struct { + ResultErrorFields + } + + // NumberAnyOfError is produced in case of a failing "anyOf" validation + // ErrorDetails: - + NumberAnyOfError struct { + ResultErrorFields + } + + // NumberOneOfError is produced in case of a failing "oneOf" validation + // ErrorDetails: - + NumberOneOfError struct { + ResultErrorFields + } + + // NumberAllOfError is produced in case of a failing "allOf" validation + // ErrorDetails: - + NumberAllOfError struct { + ResultErrorFields + } + + // NumberNotError is produced if a "not" validation failed + // ErrorDetails: - + NumberNotError struct { + ResultErrorFields + } + + // MissingDependencyError is produced in case of a "missing dependency" problem + // ErrorDetails: dependency + MissingDependencyError struct { + ResultErrorFields + } + + // InternalError indicates an internal error + // ErrorDetails: error + InternalError struct { + ResultErrorFields + } + + // ConstError indicates a const error + // ErrorDetails: allowed + ConstError struct { + ResultErrorFields + } + + // EnumError indicates an enum error + // ErrorDetails: allowed + EnumError struct { + ResultErrorFields + } + + // ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed + // ErrorDetails: - + ArrayNoAdditionalItemsError struct { + ResultErrorFields + } + + // ArrayMinItemsError is produced if an array contains less items than the allowed minimum + // ErrorDetails: min + ArrayMinItemsError struct { + ResultErrorFields + } + + // ArrayMaxItemsError is produced if an array contains more items than the allowed maximum + // ErrorDetails: max + ArrayMaxItemsError struct { + ResultErrorFields + } + + // ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items + // ErrorDetails: type, i, j + ItemsMustBeUniqueError struct { + ResultErrorFields + } + + // ArrayContainsError is produced if an array contains invalid items + // ErrorDetails: + ArrayContainsError struct { + ResultErrorFields + } + + // ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum + // ErrorDetails: min + ArrayMinPropertiesError struct { + ResultErrorFields + } + + // ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum + // ErrorDetails: max + ArrayMaxPropertiesError struct { + ResultErrorFields + } + + // AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed + // ErrorDetails: property + AdditionalPropertyNotAllowedError struct { + ResultErrorFields + } + + // InvalidPropertyPatternError is produced if an pattern was found + // ErrorDetails: property, pattern + InvalidPropertyPatternError struct { + ResultErrorFields + } + + // InvalidPropertyNameError is produced if an invalid-named property was found + // ErrorDetails: property + InvalidPropertyNameError struct { + ResultErrorFields + } + + // StringLengthGTEError is produced if a string is shorter than the minimum required length + // ErrorDetails: min + StringLengthGTEError struct { + ResultErrorFields + } + + // StringLengthLTEError is produced if a string is longer than the maximum allowed length + // ErrorDetails: max + StringLengthLTEError struct { + ResultErrorFields + } + + // DoesNotMatchPatternError is produced if a string does not match the defined pattern + // ErrorDetails: pattern + DoesNotMatchPatternError struct { + ResultErrorFields + } + + // DoesNotMatchFormatError is produced if a string does not match the defined format + // ErrorDetails: format + DoesNotMatchFormatError struct { + ResultErrorFields + } + + // MultipleOfError is produced if a number is not a multiple of the defined multipleOf + // ErrorDetails: multiple + MultipleOfError struct { + ResultErrorFields + } + + // NumberGTEError is produced if a number is lower than the allowed minimum + // ErrorDetails: min + NumberGTEError struct { + ResultErrorFields + } + + // NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set + // ErrorDetails: min + NumberGTError struct { + ResultErrorFields + } + + // NumberLTEError is produced if a number is higher than the allowed maximum + // ErrorDetails: max + NumberLTEError struct { + ResultErrorFields + } + + // NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set + // ErrorDetails: max + NumberLTError struct { + ResultErrorFields + } + + // ConditionThenError is produced if a condition's "then" validation is invalid + // ErrorDetails: - + ConditionThenError struct { + ResultErrorFields + } + + // ConditionElseError is produced if a condition's "else" condition is invalid + // ErrorDetails: - + ConditionElseError struct { + ResultErrorFields + } +) + +// newError takes a ResultError type and sets the type, context, description, details, value, and field +func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) { + var t string + var d string + switch err.(type) { + case *FalseError: + t = "false" + d = locale.False() + case *RequiredError: + t = "required" + d = locale.Required() + case *InvalidTypeError: + t = "invalid_type" + d = locale.InvalidType() + case *NumberAnyOfError: + t = "number_any_of" + d = locale.NumberAnyOf() + case *NumberOneOfError: + t = "number_one_of" + d = locale.NumberOneOf() + case *NumberAllOfError: + t = "number_all_of" + d = locale.NumberAllOf() + case *NumberNotError: + t = "number_not" + d = locale.NumberNot() + case *MissingDependencyError: + t = "missing_dependency" + d = locale.MissingDependency() + case *InternalError: + t = "internal" + d = locale.Internal() + case *ConstError: + t = "const" + d = locale.Const() + case *EnumError: + t = "enum" + d = locale.Enum() + case *ArrayNoAdditionalItemsError: + t = "array_no_additional_items" + d = locale.ArrayNoAdditionalItems() + case *ArrayMinItemsError: + t = "array_min_items" + d = locale.ArrayMinItems() + case *ArrayMaxItemsError: + t = "array_max_items" + d = locale.ArrayMaxItems() + case *ItemsMustBeUniqueError: + t = "unique" + d = locale.Unique() + case *ArrayContainsError: + t = "contains" + d = locale.ArrayContains() + case *ArrayMinPropertiesError: + t = "array_min_properties" + d = locale.ArrayMinProperties() + case *ArrayMaxPropertiesError: + t = "array_max_properties" + d = locale.ArrayMaxProperties() + case *AdditionalPropertyNotAllowedError: + t = "additional_property_not_allowed" + d = locale.AdditionalPropertyNotAllowed() + case *InvalidPropertyPatternError: + t = "invalid_property_pattern" + d = locale.InvalidPropertyPattern() + case *InvalidPropertyNameError: + t = "invalid_property_name" + d = locale.InvalidPropertyName() + case *StringLengthGTEError: + t = "string_gte" + d = locale.StringGTE() + case *StringLengthLTEError: + t = "string_lte" + d = locale.StringLTE() + case *DoesNotMatchPatternError: + t = "pattern" + d = locale.DoesNotMatchPattern() + case *DoesNotMatchFormatError: + t = "format" + d = locale.DoesNotMatchFormat() + case *MultipleOfError: + t = "multiple_of" + d = locale.MultipleOf() + case *NumberGTEError: + t = "number_gte" + d = locale.NumberGTE() + case *NumberGTError: + t = "number_gt" + d = locale.NumberGT() + case *NumberLTEError: + t = "number_lte" + d = locale.NumberLTE() + case *NumberLTError: + t = "number_lt" + d = locale.NumberLT() + case *ConditionThenError: + t = "condition_then" + d = locale.ConditionThen() + case *ConditionElseError: + t = "condition_else" + d = locale.ConditionElse() + } + + err.SetType(t) + err.SetContext(context) + err.SetValue(value) + err.SetDetails(details) + err.SetDescriptionFormat(d) + details["field"] = err.Field() + + if _, exists := details["context"]; !exists && context != nil { + details["context"] = context.String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) +} + +// formatErrorDescription takes a string in the default text/template +// format and converts it to a string with replacements. The fields come +// from the ErrorDetails struct and vary for each type of error. +func formatErrorDescription(s string, details ErrorDetails) string { + + var tpl *template.Template + var descrAsBuffer bytes.Buffer + var err error + + errorTemplates.RLock() + tpl = errorTemplates.Lookup(s) + errorTemplates.RUnlock() + + if tpl == nil { + errorTemplates.Lock() + tpl = errorTemplates.New(s) + + if ErrorTemplateFuncs != nil { + tpl.Funcs(ErrorTemplateFuncs) + } + + tpl, err = tpl.Parse(s) + errorTemplates.Unlock() + + if err != nil { + return err.Error() + } + } + + err = tpl.Execute(&descrAsBuffer, details) + if err != nil { + return err.Error() + } + + return descrAsBuffer.String() +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go new file mode 100644 index 00000000..873ffc7d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go @@ -0,0 +1,368 @@ +package gojsonschema + +import ( + "net" + "net/mail" + "net/url" + "regexp" + "strings" + "sync" + "time" +) + +type ( + // FormatChecker is the interface all formatters added to FormatCheckerChain must implement + FormatChecker interface { + // IsFormat checks if input has the correct format and type + IsFormat(input interface{}) bool + } + + // FormatCheckerChain holds the formatters + FormatCheckerChain struct { + formatters map[string]FormatChecker + } + + // EmailFormatChecker verifies email address formats + EmailFormatChecker struct{} + + // IPV4FormatChecker verifies IP addresses in the IPv4 format + IPV4FormatChecker struct{} + + // IPV6FormatChecker verifies IP addresses in the IPv6 format + IPV6FormatChecker struct{} + + // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Date: YYYY-MM-DD + // Full Time: HH:MM:SSZ-07:00 + // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + // + // Note: Nanoseconds are also suported in all formats + // + // http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimeFormatChecker struct{} + + // DateFormatChecker verifies date formats + // + // Valid format: + // Full Date: YYYY-MM-DD + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + DateFormatChecker struct{} + + // TimeFormatChecker verifies time formats + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Time: HH:MM:SSZ-07:00 + // + // Where + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + TimeFormatChecker struct{} + + // URIFormatChecker validates a URI with a valid Scheme per RFC3986 + URIFormatChecker struct{} + + // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 + URIReferenceFormatChecker struct{} + + // URITemplateFormatChecker validates a URI template per RFC6570 + URITemplateFormatChecker struct{} + + // HostnameFormatChecker validates a hostname is in the correct format + HostnameFormatChecker struct{} + + // UUIDFormatChecker validates a UUID is in the correct format + UUIDFormatChecker struct{} + + // RegexFormatChecker validates a regex is in the correct format + RegexFormatChecker struct{} + + // JSONPointerFormatChecker validates a JSON Pointer per RFC6901 + JSONPointerFormatChecker struct{} + + // RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format + RelativeJSONPointerFormatChecker struct{} +) + +var ( + // FormatCheckers holds the valid formatters, and is a public variable + // so library users can add custom formatters + FormatCheckers = FormatCheckerChain{ + formatters: map[string]FormatChecker{ + "date": DateFormatChecker{}, + "time": TimeFormatChecker{}, + "date-time": DateTimeFormatChecker{}, + "hostname": HostnameFormatChecker{}, + "email": EmailFormatChecker{}, + "idn-email": EmailFormatChecker{}, + "ipv4": IPV4FormatChecker{}, + "ipv6": IPV6FormatChecker{}, + "uri": URIFormatChecker{}, + "uri-reference": URIReferenceFormatChecker{}, + "iri": URIFormatChecker{}, + "iri-reference": URIReferenceFormatChecker{}, + "uri-template": URITemplateFormatChecker{}, + "uuid": UUIDFormatChecker{}, + "regex": RegexFormatChecker{}, + "json-pointer": JSONPointerFormatChecker{}, + "relative-json-pointer": RelativeJSONPointerFormatChecker{}, + }, + } + + // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname + rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) + + // Use a regex to make sure curly brackets are balanced properly after validating it as a AURI + rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$") + + rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + + rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$") + + rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$") + + lock = new(sync.RWMutex) +) + +// Add adds a FormatChecker to the FormatCheckerChain +// The name used will be the value used for the format key in your json schema +func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { + lock.Lock() + c.formatters[name] = f + lock.Unlock() + + return c +} + +// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) +func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { + lock.Lock() + delete(c.formatters, name) + lock.Unlock() + + return c +} + +// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name +func (c *FormatCheckerChain) Has(name string) bool { + lock.RLock() + _, ok := c.formatters[name] + lock.RUnlock() + + return ok +} + +// IsFormat will check an input against a FormatChecker with the given name +// to see if it is the correct format +func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { + lock.RLock() + f, ok := c.formatters[name] + lock.RUnlock() + + // If a format is unrecognized it should always pass validation + if !ok { + return true + } + + return f.IsFormat(input) +} + +// IsFormat checks if input is a correctly formatted e-mail address +func (f EmailFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := mail.ParseAddress(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted IPv4-address +func (f IPV4FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ".") +} + +// IsFormat checks if input is a correctly formatted IPv6=address +func (f IPV6FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ":") +} + +// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6 +func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + formats := []string{ + "15:04:05", + "15:04:05Z07:00", + "2006-01-02", + time.RFC3339, + time.RFC3339Nano, + } + + for _, format := range formats { + if _, err := time.Parse(format, asString); err == nil { + return true + } + } + + return false +} + +// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD) +func (f DateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + _, err := time.Parse("2006-01-02", asString) + return err == nil +} + +// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00) +func (f TimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if _, err := time.Parse("15:04:05Z07:00", asString); err == nil { + return true + } + + _, err := time.Parse("15:04:05", asString) + return err == nil +} + +// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986 +func (f URIFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + + if err != nil || u.Scheme == "" { + return false + } + + return !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986 +func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := url.Parse(asString) + return err == nil && !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI template per RFC6570 +func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + if err != nil || strings.Contains(asString, `\`) { + return false + } + + return rxURITemplate.MatchString(u.Path) +} + +// IsFormat checks if input is a correctly formatted hostname +func (f HostnameFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxHostname.MatchString(asString) && len(asString) < 256 +} + +// IsFormat checks if input is a correctly formatted UUID +func (f UUIDFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxUUID.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted regular expression +func (f RegexFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if asString == "" { + return true + } + _, err := regexp.Compile(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901 +func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxJSONPointer.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted relative JSON Pointer +func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxRelJSONPointer.MatchString(asString) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml new file mode 100644 index 00000000..ab6fb867 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml @@ -0,0 +1,13 @@ +package: github.com/xeipuuv/gojsonschema +license: Apache 2.0 +import: +- package: github.com/xeipuuv/gojsonschema + +- package: github.com/xeipuuv/gojsonpointer + +- package: github.com/xeipuuv/gojsonreference + +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/go.mod b/vendor/github.com/xeipuuv/gojsonschema/go.mod new file mode 100644 index 00000000..b709d7fc --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/go.mod @@ -0,0 +1,7 @@ +module github.com/xeipuuv/gojsonschema + +require ( + github.com/stretchr/testify v1.3.0 + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 +) diff --git a/vendor/github.com/xeipuuv/gojsonschema/go.sum b/vendor/github.com/xeipuuv/gojsonschema/go.sum new file mode 100644 index 00000000..0e865ac7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go new file mode 100644 index 00000000..4ef7a8d0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go @@ -0,0 +1,37 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Very simple log wrapper. +// Used for debugging/testing purposes. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "log" +) + +const internalLogEnabled = false + +func internalLog(format string, v ...interface{}) { + log.Printf(format, v...) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go new file mode 100644 index 00000000..0e979707 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go @@ -0,0 +1,73 @@ +// Copyright 2013 MongoDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author tolsen +// author-github https://github.com/tolsen +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context +// +// created 04-09-2013 + +package gojsonschema + +import "bytes" + +// JsonContext implements a persistent linked-list of strings +type JsonContext struct { + head string + tail *JsonContext +} + +// NewJsonContext creates a new JsonContext +func NewJsonContext(head string, tail *JsonContext) *JsonContext { + return &JsonContext{head, tail} +} + +// String displays the context in reverse. +// This plays well with the data structure's persistent nature with +// Cons and a json document's tree structure. +func (c *JsonContext) String(del ...string) string { + byteArr := make([]byte, 0, c.stringLen()) + buf := bytes.NewBuffer(byteArr) + c.writeStringToBuffer(buf, del) + + return buf.String() +} + +func (c *JsonContext) stringLen() int { + length := 0 + if c.tail != nil { + length = c.tail.stringLen() + 1 // add 1 for "." + } + + length += len(c.head) + return length +} + +func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { + if c.tail != nil { + c.tail.writeStringToBuffer(buf, del) + + if len(del) > 0 { + buf.WriteString(del[0]) + } else { + buf.WriteString(".") + } + } + + buf.WriteString(c.head) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go new file mode 100644 index 00000000..5d88af26 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go @@ -0,0 +1,386 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Different strategies to load JSON files. +// Includes References (file and HTTP), JSON strings and Go types. +// +// created 01-02-2015 + +package gojsonschema + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +var osFS = osFileSystem(os.Open) + +// JSONLoader defines the JSON loader interface +type JSONLoader interface { + JsonSource() interface{} + LoadJSON() (interface{}, error) + JsonReference() (gojsonreference.JsonReference, error) + LoaderFactory() JSONLoaderFactory +} + +// JSONLoaderFactory defines the JSON loader factory interface +type JSONLoaderFactory interface { + // New creates a new JSON loader for the given source + New(source string) JSONLoader +} + +// DefaultJSONLoaderFactory is the default JSON loader factory +type DefaultJSONLoaderFactory struct { +} + +// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem +type FileSystemJSONLoaderFactory struct { + fs http.FileSystem +} + +// New creates a new JSON loader for the given source +func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// New creates a new JSON loader for the given source +func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: f.fs, + source: source, + } +} + +// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. +type osFileSystem func(string) (*os.File, error) + +// Opens a file with the given name +func (o osFileSystem) Open(name string) (http.File, error) { + return o(name) +} + +// JSON Reference loader +// references are used to load JSONs from files and HTTP + +type jsonReferenceLoader struct { + fs http.FileSystem + source string +} + +func (l *jsonReferenceLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference(l.JsonSource().(string)) +} + +func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { + return &FileSystemJSONLoaderFactory{ + fs: l.fs, + } +} + +// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. +func NewReferenceLoader(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. +func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader { + return &jsonReferenceLoader{ + fs: fs, + source: source, + } +} + +func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { + + var err error + + reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) + if err != nil { + return nil, err + } + + refToURL := reference + refToURL.GetUrl().Fragment = "" + + var document interface{} + + if reference.HasFileScheme { + + filename := strings.TrimPrefix(refToURL.String(), "file://") + filename, err = url.QueryUnescape(filename) + + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, use slashes + // instead of backslashes, and have spaces escaped + filename = strings.TrimPrefix(filename, "/") + filename = filepath.FromSlash(filename) + } + + document, err = l.loadFromFile(filename) + if err != nil { + return nil, err + } + + } else { + + document, err = l.loadFromHTTP(refToURL.String()) + if err != nil { + return nil, err + } + + } + + return document, nil + +} + +func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { + + // returned cached versions for metaschemas for drafts 4, 6 and 7 + // for performance and allow for easier offline use + if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" { + return decodeJSONUsingNumber(strings.NewReader(metaSchema)) + } + + resp, err := http.Get(address) + if err != nil { + return nil, err + } + + // must return HTTP Status 200 OK + if resp.StatusCode != http.StatusOK { + return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) + } + + bodyBuff, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) +} + +func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { + f, err := l.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + bodyBuff, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) + +} + +// JSON string loader + +type jsonStringLoader struct { + source string +} + +func (l *jsonStringLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewStringLoader creates a new JSONLoader, taking a string as source +func NewStringLoader(source string) JSONLoader { + return &jsonStringLoader{source: source} +} + +func (l *jsonStringLoader) LoadJSON() (interface{}, error) { + + return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string))) + +} + +// JSON bytes loader + +type jsonBytesLoader struct { + source []byte +} + +func (l *jsonBytesLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source +func NewBytesLoader(source []byte) JSONLoader { + return &jsonBytesLoader{source: source} +} + +func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) +} + +// JSON Go (types) loader +// used to load JSONs from the code as maps, interface{}, structs ... + +type jsonGoLoader struct { + source interface{} +} + +func (l *jsonGoLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewGoLoader creates a new JSONLoader from a given Go struct +func NewGoLoader(source interface{}) JSONLoader { + return &jsonGoLoader{source: source} +} + +func (l *jsonGoLoader) LoadJSON() (interface{}, error) { + + // convert it to a compliant JSON first to avoid types "mismatches" + + jsonBytes, err := json.Marshal(l.JsonSource()) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(jsonBytes)) + +} + +type jsonIOLoader struct { + buf *bytes.Buffer +} + +// NewReaderLoader creates a new JSON loader using the provided io.Reader +func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) +} + +// NewWriterLoader creates a new JSON loader using the provided io.Writer +func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) +} + +func (l *jsonIOLoader) JsonSource() interface{} { + return l.buf.String() +} + +func (l *jsonIOLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(l.buf) +} + +func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// JSON raw loader +// In case the JSON is already marshalled to interface{} use this loader +// This is used for testing as otherwise there is no guarantee the JSON is marshalled +// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber +type jsonRawLoader struct { + source interface{} +} + +// NewRawLoader creates a new JSON raw loader for the given source +func NewRawLoader(source interface{}) JSONLoader { + return &jsonRawLoader{source: source} +} +func (l *jsonRawLoader) JsonSource() interface{} { + return l.source +} +func (l *jsonRawLoader) LoadJSON() (interface{}, error) { + return l.source, nil +} +func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} +func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { + + var document interface{} + + decoder := json.NewDecoder(r) + decoder.UseNumber() + + err := decoder.Decode(&document) + if err != nil { + return nil, err + } + + return document, nil + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go new file mode 100644 index 00000000..a416225c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go @@ -0,0 +1,472 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const string and messages. +// +// created 01-01-2015 + +package gojsonschema + +type ( + // locale is an interface for defining custom error strings + locale interface { + + // False returns a format-string for "false" schema validation errors + False() string + + // Required returns a format-string for "required" schema validation errors + Required() string + + // InvalidType returns a format-string for "invalid type" schema validation errors + InvalidType() string + + // NumberAnyOf returns a format-string for "anyOf" schema validation errors + NumberAnyOf() string + + // NumberOneOf returns a format-string for "oneOf" schema validation errors + NumberOneOf() string + + // NumberAllOf returns a format-string for "allOf" schema validation errors + NumberAllOf() string + + // NumberNot returns a format-string to format a NumberNotError + NumberNot() string + + // MissingDependency returns a format-string for "missing dependency" schema validation errors + MissingDependency() string + + // Internal returns a format-string for internal errors + Internal() string + + // Const returns a format-string to format a ConstError + Const() string + + // Enum returns a format-string to format an EnumError + Enum() string + + // ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema + ArrayNotEnoughItems() string + + // ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError + ArrayNoAdditionalItems() string + + // ArrayMinItems returns a format-string to format an ArrayMinItemsError + ArrayMinItems() string + + // ArrayMaxItems returns a format-string to format an ArrayMaxItemsError + ArrayMaxItems() string + + // Unique returns a format-string to format an ItemsMustBeUniqueError + Unique() string + + // ArrayContains returns a format-string to format an ArrayContainsError + ArrayContains() string + + // ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError + ArrayMinProperties() string + + // ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError + ArrayMaxProperties() string + + // AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError + AdditionalPropertyNotAllowed() string + + // InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError + InvalidPropertyPattern() string + + // InvalidPropertyName returns a format-string to format an InvalidPropertyNameError + InvalidPropertyName() string + + // StringGTE returns a format-string to format an StringLengthGTEError + StringGTE() string + + // StringLTE returns a format-string to format an StringLengthLTEError + StringLTE() string + + // DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError + DoesNotMatchPattern() string + + // DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError + DoesNotMatchFormat() string + + // MultipleOf returns a format-string to format an MultipleOfError + MultipleOf() string + + // NumberGTE returns a format-string to format an NumberGTEError + NumberGTE() string + + // NumberGT returns a format-string to format an NumberGTError + NumberGT() string + + // NumberLTE returns a format-string to format an NumberLTEError + NumberLTE() string + + // NumberLT returns a format-string to format an NumberLTError + NumberLT() string + + // Schema validations + + // RegexPattern returns a format-string to format a regex-pattern error + RegexPattern() string + + // GreaterThanZero returns a format-string to format an error where a number must be greater than zero + GreaterThanZero() string + + // MustBeOfA returns a format-string to format an error where a value is of the wrong type + MustBeOfA() string + + // MustBeOfAn returns a format-string to format an error where a value is of the wrong type + MustBeOfAn() string + + // CannotBeUsedWithout returns a format-string to format a "cannot be used without" error + CannotBeUsedWithout() string + + // CannotBeGT returns a format-string to format an error where a value are greater than allowed + CannotBeGT() string + + // MustBeOfType returns a format-string to format an error where a value does not match the required type + MustBeOfType() string + + // MustBeValidRegex returns a format-string to format an error where a regex is invalid + MustBeValidRegex() string + + // MustBeValidFormat returns a format-string to format an error where a value does not match the expected format + MustBeValidFormat() string + + // MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 + MustBeGTEZero() string + + // KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed + KeyCannotBeGreaterThan() string + + // KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type + KeyItemsMustBeOfType() string + + // KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique + KeyItemsMustBeUnique() string + + // ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error + ReferenceMustBeCanonical() string + + // NotAValidType returns a format-string to format an invalid type error + NotAValidType() string + + // Duplicated returns a format-string to format an error where types are duplicated + Duplicated() string + + // HttpBadStatus returns a format-string for errors when loading a schema using HTTP + HttpBadStatus() string + + // ParseError returns a format-string for JSON parsing errors + ParseError() string + + // ConditionThen returns a format-string for ConditionThenError errors + ConditionThen() string + + // ConditionElse returns a format-string for ConditionElseError errors + ConditionElse() string + + // ErrorFormat returns a format string for errors + ErrorFormat() string + } + + // DefaultLocale is the default locale for this package + DefaultLocale struct{} +) + +// False returns a format-string for "false" schema validation errors +func (l DefaultLocale) False() string { + return "False always fails validation" +} + +// Required returns a format-string for "required" schema validation errors +func (l DefaultLocale) Required() string { + return `{{.property}} is required` +} + +// InvalidType returns a format-string for "invalid type" schema validation errors +func (l DefaultLocale) InvalidType() string { + return `Invalid type. Expected: {{.expected}}, given: {{.given}}` +} + +// NumberAnyOf returns a format-string for "anyOf" schema validation errors +func (l DefaultLocale) NumberAnyOf() string { + return `Must validate at least one schema (anyOf)` +} + +// NumberOneOf returns a format-string for "oneOf" schema validation errors +func (l DefaultLocale) NumberOneOf() string { + return `Must validate one and only one schema (oneOf)` +} + +// NumberAllOf returns a format-string for "allOf" schema validation errors +func (l DefaultLocale) NumberAllOf() string { + return `Must validate all the schemas (allOf)` +} + +// NumberNot returns a format-string to format a NumberNotError +func (l DefaultLocale) NumberNot() string { + return `Must not validate the schema (not)` +} + +// MissingDependency returns a format-string for "missing dependency" schema validation errors +func (l DefaultLocale) MissingDependency() string { + return `Has a dependency on {{.dependency}}` +} + +// Internal returns a format-string for internal errors +func (l DefaultLocale) Internal() string { + return `Internal Error {{.error}}` +} + +// Const returns a format-string to format a ConstError +func (l DefaultLocale) Const() string { + return `{{.field}} does not match: {{.allowed}}` +} + +// Enum returns a format-string to format an EnumError +func (l DefaultLocale) Enum() string { + return `{{.field}} must be one of the following: {{.allowed}}` +} + +// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError +func (l DefaultLocale) ArrayNoAdditionalItems() string { + return `No additional items allowed on array` +} + +// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema +func (l DefaultLocale) ArrayNotEnoughItems() string { + return `Not enough items on array to match positional list of schema` +} + +// ArrayMinItems returns a format-string to format an ArrayMinItemsError +func (l DefaultLocale) ArrayMinItems() string { + return `Array must have at least {{.min}} items` +} + +// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError +func (l DefaultLocale) ArrayMaxItems() string { + return `Array must have at most {{.max}} items` +} + +// Unique returns a format-string to format an ItemsMustBeUniqueError +func (l DefaultLocale) Unique() string { + return `{{.type}} items[{{.i}},{{.j}}] must be unique` +} + +// ArrayContains returns a format-string to format an ArrayContainsError +func (l DefaultLocale) ArrayContains() string { + return `At least one of the items must match` +} + +// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError +func (l DefaultLocale) ArrayMinProperties() string { + return `Must have at least {{.min}} properties` +} + +// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError +func (l DefaultLocale) ArrayMaxProperties() string { + return `Must have at most {{.max}} properties` +} + +// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError +func (l DefaultLocale) AdditionalPropertyNotAllowed() string { + return `Additional property {{.property}} is not allowed` +} + +// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError +func (l DefaultLocale) InvalidPropertyPattern() string { + return `Property "{{.property}}" does not match pattern {{.pattern}}` +} + +// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError +func (l DefaultLocale) InvalidPropertyName() string { + return `Property name of "{{.property}}" does not match` +} + +// StringGTE returns a format-string to format an StringLengthGTEError +func (l DefaultLocale) StringGTE() string { + return `String length must be greater than or equal to {{.min}}` +} + +// StringLTE returns a format-string to format an StringLengthLTEError +func (l DefaultLocale) StringLTE() string { + return `String length must be less than or equal to {{.max}}` +} + +// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError +func (l DefaultLocale) DoesNotMatchPattern() string { + return `Does not match pattern '{{.pattern}}'` +} + +// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError +func (l DefaultLocale) DoesNotMatchFormat() string { + return `Does not match format '{{.format}}'` +} + +// MultipleOf returns a format-string to format an MultipleOfError +func (l DefaultLocale) MultipleOf() string { + return `Must be a multiple of {{.multiple}}` +} + +// NumberGTE returns the format string to format a NumberGTEError +func (l DefaultLocale) NumberGTE() string { + return `Must be greater than or equal to {{.min}}` +} + +// NumberGT returns the format string to format a NumberGTError +func (l DefaultLocale) NumberGT() string { + return `Must be greater than {{.min}}` +} + +// NumberLTE returns the format string to format a NumberLTEError +func (l DefaultLocale) NumberLTE() string { + return `Must be less than or equal to {{.max}}` +} + +// NumberLT returns the format string to format a NumberLTError +func (l DefaultLocale) NumberLT() string { + return `Must be less than {{.max}}` +} + +// Schema validators + +// RegexPattern returns a format-string to format a regex-pattern error +func (l DefaultLocale) RegexPattern() string { + return `Invalid regex pattern '{{.pattern}}'` +} + +// GreaterThanZero returns a format-string to format an error where a number must be greater than zero +func (l DefaultLocale) GreaterThanZero() string { + return `{{.number}} must be strictly greater than 0` +} + +// MustBeOfA returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfA() string { + return `{{.x}} must be of a {{.y}}` +} + +// MustBeOfAn returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfAn() string { + return `{{.x}} must be of an {{.y}}` +} + +// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error +func (l DefaultLocale) CannotBeUsedWithout() string { + return `{{.x}} cannot be used without {{.y}}` +} + +// CannotBeGT returns a format-string to format an error where a value are greater than allowed +func (l DefaultLocale) CannotBeGT() string { + return `{{.x}} cannot be greater than {{.y}}` +} + +// MustBeOfType returns a format-string to format an error where a value does not match the required type +func (l DefaultLocale) MustBeOfType() string { + return `{{.key}} must be of type {{.type}}` +} + +// MustBeValidRegex returns a format-string to format an error where a regex is invalid +func (l DefaultLocale) MustBeValidRegex() string { + return `{{.key}} must be a valid regex` +} + +// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format +func (l DefaultLocale) MustBeValidFormat() string { + return `{{.key}} must be a valid format {{.given}}` +} + +// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 +func (l DefaultLocale) MustBeGTEZero() string { + return `{{.key}} must be greater than or equal to 0` +} + +// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed +func (l DefaultLocale) KeyCannotBeGreaterThan() string { + return `{{.key}} cannot be greater than {{.y}}` +} + +// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type +func (l DefaultLocale) KeyItemsMustBeOfType() string { + return `{{.key}} items must be {{.type}}` +} + +// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique +func (l DefaultLocale) KeyItemsMustBeUnique() string { + return `{{.key}} items must be unique` +} + +// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error +func (l DefaultLocale) ReferenceMustBeCanonical() string { + return `Reference {{.reference}} must be canonical` +} + +// NotAValidType returns a format-string to format an invalid type error +func (l DefaultLocale) NotAValidType() string { + return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` +} + +// Duplicated returns a format-string to format an error where types are duplicated +func (l DefaultLocale) Duplicated() string { + return `{{.type}} type is duplicated` +} + +// HttpBadStatus returns a format-string for errors when loading a schema using HTTP +func (l DefaultLocale) HttpBadStatus() string { + return `Could not read schema from HTTP, response status is {{.status}}` +} + +// ErrorFormat returns a format string for errors +// Replacement options: field, description, context, value +func (l DefaultLocale) ErrorFormat() string { + return `{{.field}}: {{.description}}` +} + +// ParseError returns a format-string for JSON parsing errors +func (l DefaultLocale) ParseError() string { + return `Expected: {{.expected}}, given: Invalid JSON` +} + +// ConditionThen returns a format-string for ConditionThenError errors +// If/Else +func (l DefaultLocale) ConditionThen() string { + return `Must validate "then" as "if" was valid` +} + +// ConditionElse returns a format-string for ConditionElseError errors +func (l DefaultLocale) ConditionElse() string { + return `Must validate "else" as "if" was not valid` +} + +// constants +const ( + STRING_NUMBER = "number" + STRING_ARRAY_OF_STRINGS = "array of strings" + STRING_ARRAY_OF_SCHEMAS = "array of schemas" + STRING_SCHEMA = "valid schema" + STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" + STRING_PROPERTIES = "properties" + STRING_DEPENDENCY = "dependency" + STRING_PROPERTY = "property" + STRING_UNDEFINED = "undefined" + STRING_CONTEXT_ROOT = "(root)" + STRING_ROOT_SCHEMA_PROPERTY = "(root)" +) diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go new file mode 100644 index 00000000..0a017914 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/result.go @@ -0,0 +1,220 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Result and ResultError implementations. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "fmt" + "strings" +) + +type ( + // ErrorDetails is a map of details specific to each error. + // While the values will vary, every error will contain a "field" value + ErrorDetails map[string]interface{} + + // ResultError is the interface that library errors must implement + ResultError interface { + // Field returns the field name without the root context + // i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName + Field() string + // SetType sets the error-type + SetType(string) + // Type returns the error-type + Type() string + // SetContext sets the JSON-context for the error + SetContext(*JsonContext) + // Context returns the JSON-context of the error + Context() *JsonContext + // SetDescription sets a description for the error + SetDescription(string) + // Description returns the description of the error + Description() string + // SetDescriptionFormat sets the format for the description in the default text/template format + SetDescriptionFormat(string) + // DescriptionFormat returns the format for the description in the default text/template format + DescriptionFormat() string + // SetValue sets the value related to the error + SetValue(interface{}) + // Value returns the value related to the error + Value() interface{} + // SetDetails sets the details specific to the error + SetDetails(ErrorDetails) + // Details returns details about the error + Details() ErrorDetails + // String returns a string representation of the error + String() string + } + + // ResultErrorFields holds the fields for each ResultError implementation. + // ResultErrorFields implements the ResultError interface, so custom errors + // can be defined by just embedding this type + ResultErrorFields struct { + errorType string // A string with the type of error (i.e. invalid_type) + context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... + description string // A human readable error message + descriptionFormat string // A format for human readable error message + value interface{} // Value given by the JSON file that is the source of the error + details ErrorDetails + } + + // Result holds the result of a validation + Result struct { + errors []ResultError + // Scores how well the validation matched. Useful in generating + // better error messages for anyOf and oneOf. + score int + } +) + +// Field returns the field name without the root context +// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName +func (v *ResultErrorFields) Field() string { + return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") +} + +// SetType sets the error-type +func (v *ResultErrorFields) SetType(errorType string) { + v.errorType = errorType +} + +// Type returns the error-type +func (v *ResultErrorFields) Type() string { + return v.errorType +} + +// SetContext sets the JSON-context for the error +func (v *ResultErrorFields) SetContext(context *JsonContext) { + v.context = context +} + +// Context returns the JSON-context of the error +func (v *ResultErrorFields) Context() *JsonContext { + return v.context +} + +// SetDescription sets a description for the error +func (v *ResultErrorFields) SetDescription(description string) { + v.description = description +} + +// Description returns the description of the error +func (v *ResultErrorFields) Description() string { + return v.description +} + +// SetDescriptionFormat sets the format for the description in the default text/template format +func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) { + v.descriptionFormat = descriptionFormat +} + +// DescriptionFormat returns the format for the description in the default text/template format +func (v *ResultErrorFields) DescriptionFormat() string { + return v.descriptionFormat +} + +// SetValue sets the value related to the error +func (v *ResultErrorFields) SetValue(value interface{}) { + v.value = value +} + +// Value returns the value related to the error +func (v *ResultErrorFields) Value() interface{} { + return v.value +} + +// SetDetails sets the details specific to the error +func (v *ResultErrorFields) SetDetails(details ErrorDetails) { + v.details = details +} + +// Details returns details about the error +func (v *ResultErrorFields) Details() ErrorDetails { + return v.details +} + +// String returns a string representation of the error +func (v ResultErrorFields) String() string { + // as a fallback, the value is displayed go style + valueString := fmt.Sprintf("%v", v.value) + + // marshal the go value value to json + if v.value == nil { + valueString = TYPE_NULL + } else { + if vs, err := marshalToJSONString(v.value); err == nil { + if vs == nil { + valueString = TYPE_NULL + } else { + valueString = *vs + } + } + } + + return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ + "context": v.context.String(), + "description": v.description, + "value": valueString, + "field": v.Field(), + }) +} + +// Valid indicates if no errors were found +func (v *Result) Valid() bool { + return len(v.errors) == 0 +} + +// Errors returns the errors that were found +func (v *Result) Errors() []ResultError { + return v.errors +} + +// AddError appends a fully filled error to the error set +// SetDescription() will be called with the result of the parsed err.DescriptionFormat() +func (v *Result) AddError(err ResultError, details ErrorDetails) { + if _, exists := details["context"]; !exists && err.Context() != nil { + details["context"] = err.Context().String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) + + v.errors = append(v.errors, err) +} + +func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) { + newError(err, context, value, Locale, details) + v.errors = append(v.errors, err) + v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function +} + +// Used to copy errors from a sub-schema to the main one +func (v *Result) mergeErrors(otherResult *Result) { + v.errors = append(v.errors, otherResult.Errors()...) + v.score += otherResult.score +} + +func (v *Result) incrementScore() { + v.score++ +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go new file mode 100644 index 00000000..9e93cd79 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go @@ -0,0 +1,1087 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines Schema, the main entry to every subSchema. +// Contains the parsing logic and error checking. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "math/big" + "reflect" + "regexp" + "text/template" + + "github.com/xeipuuv/gojsonreference" +) + +var ( + // Locale is the default locale to use + // Library users can overwrite with their own implementation + Locale locale = DefaultLocale{} + + // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. + ErrorTemplateFuncs template.FuncMap +) + +// NewSchema instances a schema using the given JSONLoader +func NewSchema(l JSONLoader) (*Schema, error) { + return NewSchemaLoader().Compile(l) +} + +// Schema holds a schema +type Schema struct { + documentReference gojsonreference.JsonReference + rootSchema *subSchema + pool *schemaPool + referencePool *schemaReferencePool +} + +func (d *Schema) parse(document interface{}, draft Draft) error { + d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft} + return d.parseSchema(document, d.rootSchema) +} + +// SetRootSchemaName sets the root-schema name +func (d *Schema) SetRootSchemaName(name string) { + d.rootSchema.property = name +} + +// Parses a subSchema +// +// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring +// Not much magic involved here, most of the job is to validate the key names and their values, +// then the values are copied into subSchema struct +// +func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { + + if currentSchema.draft == nil { + if currentSchema.parent == nil { + return errors.New("Draft not set") + } + currentSchema.draft = currentSchema.parent.draft + } + + // As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}" + if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) { + b := documentNode.(bool) + currentSchema.pass = &b + return nil + } + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.ParseError(), + ErrorDetails{ + "expected": STRING_SCHEMA, + }, + )) + } + + m := documentNode.(map[string]interface{}) + + if currentSchema.parent == nil { + currentSchema.ref = &d.documentReference + currentSchema.id = &d.documentReference + } + + if currentSchema.id == nil && currentSchema.parent != nil { + currentSchema.id = currentSchema.parent.id + } + + // In draft 6 the id keyword was renamed to $id + // Hybrid mode uses the old id by default + var keyID string + + switch *currentSchema.draft { + case Draft4: + keyID = KEY_ID + case Hybrid: + keyID = KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + default: + keyID = KEY_ID_NEW + } + if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": keyID, + }, + )) + } + if k, ok := m[keyID].(string); ok { + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + if currentSchema == d.rootSchema { + currentSchema.id = &jsonReference + } else { + ref, err := currentSchema.parent.id.Inherits(jsonReference) + if err != nil { + return err + } + currentSchema.id = ref + } + } + + // definitions + if existsMapKey(m, KEY_DEFINITIONS) { + if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) { + for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { + if isKind(dv, reflect.Map, reflect.Bool) { + + newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema} + + err := d.parseSchema(dv, newSchema) + + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + + } + + // title + if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_TITLE, + }, + )) + } + if k, ok := m[KEY_TITLE].(string); ok { + currentSchema.title = &k + } + + // description + if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_DESCRIPTION, + }, + )) + } + if k, ok := m[KEY_DESCRIPTION].(string); ok { + currentSchema.description = &k + } + + // $ref + if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_REF, + }, + )) + } + + if k, ok := m[KEY_REF].(string); ok { + + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + + currentSchema.ref = &jsonReference + + if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok { + currentSchema.refSchema = sch + } else { + err := d.parseReference(documentNode, currentSchema) + + if err != nil { + return err + } + + return nil + } + } + + // type + if existsMapKey(m, KEY_TYPE) { + if isKind(m[KEY_TYPE], reflect.String) { + if k, ok := m[KEY_TYPE].(string); ok { + err := currentSchema.types.Add(k) + if err != nil { + return err + } + } + } else { + if isKind(m[KEY_TYPE], reflect.Slice) { + arrayOfTypes := m[KEY_TYPE].([]interface{}) + for _, typeInArray := range arrayOfTypes { + if reflect.ValueOf(typeInArray).Kind() != reflect.String { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + if err := currentSchema.types.Add(typeInArray.(string)); err != nil { + return err + } + } + + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + } + } + + // properties + if existsMapKey(m, KEY_PROPERTIES) { + err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) + if err != nil { + return err + } + } + + // additionalProperties + if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { + if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { + currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) + } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalProperties = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_PROPERTIES, + }, + )) + } + } + + // patternProperties + if existsMapKey(m, KEY_PATTERN_PROPERTIES) { + if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { + patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) + if len(patternPropertiesMap) > 0 { + currentSchema.patternProperties = make(map[string]*subSchema) + for k, v := range patternPropertiesMap { + _, err := regexp.MatchString(k, "") + if err != nil { + return errors.New(formatErrorDescription( + Locale.RegexPattern(), + ErrorDetails{"pattern": k}, + )) + } + newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err = d.parseSchema(v, newSchema) + if err != nil { + return errors.New(err.Error()) + } + currentSchema.patternProperties[k] = newSchema + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // propertyNames + if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 { + if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertyNames = newSchema + err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // dependencies + if existsMapKey(m, KEY_DEPENDENCIES) { + err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) + if err != nil { + return err + } + } + + // items + if existsMapKey(m, KEY_ITEMS) { + if isKind(m[KEY_ITEMS], reflect.Slice) { + for _, itemElement := range m[KEY_ITEMS].([]interface{}) { + if isKind(itemElement, reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(itemElement, newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + currentSchema.itemsChildrenIsSingleSchema = false + } + } else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(m[KEY_ITEMS], newSchema) + if err != nil { + return err + } + currentSchema.itemsChildrenIsSingleSchema = true + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + } + + // additionalItems + if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { + if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { + currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) + } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalItems = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_ITEMS, + }, + )) + } + } + + // validation : number / integer + + if existsMapKey(m, KEY_MULTIPLE_OF) { + multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) + if multipleOfValue == nil { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_NUMBER, + "given": KEY_MULTIPLE_OF, + }, + )) + } + if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 { + return errors.New(formatErrorDescription( + Locale.GreaterThanZero(), + ErrorDetails{"number": KEY_MULTIPLE_OF}, + )) + } + currentSchema.multipleOf = multipleOfValue + } + + if existsMapKey(m, KEY_MINIMUM) { + minimumValue := mustBeNumber(m[KEY_MINIMUM]) + if minimumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.minimum = minimumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + } + } + + if existsMapKey(m, KEY_MAXIMUM) { + maximumValue := mustBeNumber(m[KEY_MAXIMUM]) + if maximumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.maximum = maximumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + } + } + + // validation : string + + if existsMapKey(m, KEY_MIN_LENGTH) { + minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) + if minLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *minLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_LENGTH}, + )) + } + currentSchema.minLength = minLengthIntegerValue + } + + if existsMapKey(m, KEY_MAX_LENGTH) { + maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) + if maxLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *maxLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_LENGTH}, + )) + } + currentSchema.maxLength = maxLengthIntegerValue + } + + if currentSchema.minLength != nil && currentSchema.maxLength != nil { + if *currentSchema.minLength > *currentSchema.maxLength { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, + )) + } + } + + if existsMapKey(m, KEY_PATTERN) { + if isKind(m[KEY_PATTERN], reflect.String) { + regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) + if err != nil { + return errors.New(formatErrorDescription( + Locale.MustBeValidRegex(), + ErrorDetails{"key": KEY_PATTERN}, + )) + } + currentSchema.pattern = regexpObject + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, + )) + } + } + + if existsMapKey(m, KEY_FORMAT) { + formatString, ok := m[KEY_FORMAT].(string) + if !ok { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_FORMAT, "type": TYPE_STRING}, + )) + } + currentSchema.format = formatString + } + + // validation : object + + if existsMapKey(m, KEY_MIN_PROPERTIES) { + minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) + if minPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *minPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_PROPERTIES}, + )) + } + currentSchema.minProperties = minPropertiesIntegerValue + } + + if existsMapKey(m, KEY_MAX_PROPERTIES) { + maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) + if maxPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *maxPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_PROPERTIES}, + )) + } + currentSchema.maxProperties = maxPropertiesIntegerValue + } + + if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { + if *currentSchema.minProperties > *currentSchema.maxProperties { + return errors.New(formatErrorDescription( + Locale.KeyCannotBeGreaterThan(), + ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, + )) + } + } + + if existsMapKey(m, KEY_REQUIRED) { + if isKind(m[KEY_REQUIRED], reflect.Slice) { + requiredValues := m[KEY_REQUIRED].([]interface{}) + for _, requiredValue := range requiredValues { + if isKind(requiredValue, reflect.String) { + if isStringInSlice(currentSchema.required, requiredValue.(string)) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_REQUIRED}, + )) + } + currentSchema.required = append(currentSchema.required, requiredValue.(string)) + } else { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeOfType(), + ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, + )) + } + } + + // validation : array + + if existsMapKey(m, KEY_MIN_ITEMS) { + minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) + if minItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *minItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_ITEMS}, + )) + } + currentSchema.minItems = minItemsIntegerValue + } + + if existsMapKey(m, KEY_MAX_ITEMS) { + maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) + if maxItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *maxItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_ITEMS}, + )) + } + currentSchema.maxItems = maxItemsIntegerValue + } + + if existsMapKey(m, KEY_UNIQUE_ITEMS) { + if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { + currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 { + newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.contains = newSchema + err := d.parseSchema(m[KEY_CONTAINS], newSchema) + if err != nil { + return err + } + } + + // validation : all + + if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 { + is, err := marshalWithoutNumber(m[KEY_CONST]) + if err != nil { + return err + } + currentSchema._const = is + } + + if existsMapKey(m, KEY_ENUM) { + if isKind(m[KEY_ENUM], reflect.Slice) { + for _, v := range m[KEY_ENUM].([]interface{}) { + is, err := marshalWithoutNumber(v) + if err != nil { + return err + } + if isStringInSlice(currentSchema.enum, *is) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_ENUM}, + )) + } + currentSchema.enum = append(currentSchema.enum, *is) + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, + )) + } + } + + // validation : subSchema + + if existsMapKey(m, KEY_ONE_OF) { + if isKind(m[KEY_ONE_OF], reflect.Slice) { + for _, v := range m[KEY_ONE_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.oneOf = append(currentSchema.oneOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ANY_OF) { + if isKind(m[KEY_ANY_OF], reflect.Slice) { + for _, v := range m[KEY_ANY_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.anyOf = append(currentSchema.anyOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ALL_OF) { + if isKind(m[KEY_ALL_OF], reflect.Slice) { + for _, v := range m[KEY_ALL_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.allOf = append(currentSchema.allOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_NOT) { + if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} + currentSchema.not = newSchema + err := d.parseSchema(m[KEY_NOT], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, + )) + } + } + + if *currentSchema.draft >= Draft7 { + if existsMapKey(m, KEY_IF) { + if isKind(m[KEY_IF], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref} + currentSchema._if = newSchema + err := d.parseSchema(m[KEY_IF], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_THEN) { + if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref} + currentSchema._then = newSchema + err := d.parseSchema(m[KEY_THEN], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_ELSE) { + if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref} + currentSchema._else = newSchema + err := d.parseSchema(m[KEY_ELSE], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT}, + )) + } + } + } + + return nil +} + +func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error { + var ( + refdDocumentNode interface{} + dsp *schemaPoolDocument + err error + ) + + newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} + + d.referencePool.Add(currentSchema.ref.String(), newSchema) + + dsp, err = d.pool.GetDocument(*currentSchema.ref) + if err != nil { + return err + } + newSchema.id = currentSchema.ref + + refdDocumentNode = dsp.Document + newSchema.draft = dsp.Draft + + if err != nil { + return err + } + + if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, + )) + } + + err = d.parseSchema(refdDocumentNode, newSchema) + if err != nil { + return err + } + + currentSchema.refSchema = newSchema + + return nil + +} + +func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + for k := range m { + schemaProperty := k + newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertiesChildren = append(currentSchema.propertiesChildren, newSchema) + err := d.parseSchema(m[k], newSchema) + if err != nil { + return err + } + } + + return nil +} + +func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + currentSchema.dependencies = make(map[string]interface{}) + + for k := range m { + switch reflect.ValueOf(m[k]).Kind() { + + case reflect.Slice: + values := m[k].([]interface{}) + var valuesToRegister []string + + for _, value := range values { + if !isKind(value, reflect.String) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + valuesToRegister = append(valuesToRegister, value.(string)) + currentSchema.dependencies[k] = valuesToRegister + } + + case reflect.Map, reflect.Bool: + depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err := d.parseSchema(m[k], depSchema) + if err != nil { + return err + } + currentSchema.dependencies[k] = depSchema + + default: + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + + } + + return nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go new file mode 100644 index 00000000..20db0c1f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go @@ -0,0 +1,206 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "bytes" + "errors" + + "github.com/xeipuuv/gojsonreference" +) + +// SchemaLoader is used to load schemas +type SchemaLoader struct { + pool *schemaPool + AutoDetect bool + Validate bool + Draft Draft +} + +// NewSchemaLoader creates a new NewSchemaLoader +func NewSchemaLoader() *SchemaLoader { + + ps := &SchemaLoader{ + pool: &schemaPool{ + schemaPoolDocuments: make(map[string]*schemaPoolDocument), + }, + AutoDetect: true, + Validate: false, + Draft: Hybrid, + } + ps.pool.autoDetect = &ps.AutoDetect + + return ps +} + +func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error { + + var ( + schema string + err error + ) + if sl.AutoDetect { + schema, _, err = parseSchemaURL(documentNode) + if err != nil { + return err + } + } + + // If no explicit "$schema" is used, use the default metaschema associated with the draft used + if schema == "" { + if sl.Draft == Hybrid { + return nil + } + schema = drafts.GetSchemaURL(sl.Draft) + } + + //Disable validation when loading the metaschema to prevent an infinite recursive loop + sl.Validate = false + + metaSchema, err := sl.Compile(NewReferenceLoader(schema)) + + if err != nil { + return err + } + + sl.Validate = true + + result := metaSchema.validateDocument(documentNode) + + if !result.Valid() { + var res bytes.Buffer + for _, err := range result.Errors() { + res.WriteString(err.String()) + res.WriteString("\n") + } + return errors.New(res.String()) + } + + return nil +} + +// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require +// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema +func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error { + emptyRef, _ := gojsonreference.NewJsonReference("") + + for _, loader := range loaders { + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + // Directly use the Recursive function, so that it get only added to the schema pool by $id + // and not by the ref of the document as it's empty + if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil { + return err + } + } + + return nil +} + +//AddSchema adds a schema under the provided URL to the schema cache +func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error { + + ref, err := gojsonreference.NewJsonReference(url) + + if err != nil { + return err + } + + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + return sl.pool.parseReferences(doc, ref, true) +} + +// Compile loads and compiles a schema +func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) { + + ref, err := rootSchema.JsonReference() + + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = sl.pool + d.pool.jsonLoaderFactory = rootSchema.LoaderFactory() + d.documentReference = ref + d.referencePool = newSchemaReferencePool() + + var doc interface{} + if ref.String() != "" { + // Get document from schema pool + spd, err := d.pool.GetDocument(d.documentReference) + if err != nil { + return nil, err + } + doc = spd.Document + } else { + // Load JSON directly + doc, err = rootSchema.LoadJSON() + if err != nil { + return nil, err + } + // References need only be parsed if loading JSON directly + // as pool.GetDocument already does this for us if loading by reference + err = sl.pool.parseReferences(doc, ref, true) + if err != nil { + return nil, err + } + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return nil, err + } + } + + draft := sl.Draft + if sl.AutoDetect { + _, detectedDraft, err := parseSchemaURL(doc) + if err != nil { + return nil, err + } + if detectedDraft != nil { + draft = *detectedDraft + } + } + + err = d.parse(doc, draft) + if err != nil { + return nil, err + } + + return &d, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go new file mode 100644 index 00000000..35b1cc63 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go @@ -0,0 +1,215 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines resources pooling. +// Eases referencing and avoids downloading the same resource twice. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +type schemaPoolDocument struct { + Document interface{} + Draft *Draft +} + +type schemaPool struct { + schemaPoolDocuments map[string]*schemaPoolDocument + jsonLoaderFactory JSONLoaderFactory + autoDetect *bool +} + +func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error { + + var ( + draft *Draft + err error + reference = ref.String() + ) + // Only the root document should be added to the schema pool if pooled is true + if _, ok := p.schemaPoolDocuments[reference]; pooled && ok { + return fmt.Errorf("Reference already exists: \"%s\"", reference) + } + + if *p.autoDetect { + _, draft, err = parseSchemaURL(document) + if err != nil { + return err + } + } + + err = p.parseReferencesRecursive(document, ref, draft) + + if pooled { + p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft} + } + + return err +} + +func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error { + // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references. + // For $ref references it takes into account the $id scope it is in and replaces + // the reference by the absolute resolved reference + + // When encountering errors it fails silently. Error handling is done when the schema + // is syntactically parsed and any error encountered here should also come up there. + switch m := document.(type) { + case []interface{}: + for _, v := range m { + p.parseReferencesRecursive(v, ref, draft) + } + case map[string]interface{}: + localRef := &ref + + keyID := KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string)) + if err == nil { + localRef, err = ref.Inherits(jsonReference) + if err == nil { + if _, ok := p.schemaPoolDocuments[localRef.String()]; ok { + return fmt.Errorf("Reference already exists: \"%s\"", localRef.String()) + } + p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft} + } + } + } + + if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string)) + if err == nil { + absoluteRef, err := localRef.Inherits(jsonReference) + if err == nil { + m[KEY_REF] = absoluteRef.String() + } + } + } + + for k, v := range m { + // const and enums should be interpreted literally, so ignore them + if k == KEY_CONST || k == KEY_ENUM { + continue + } + // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc + // Therefore don't treat it like a schema. + if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES { + if child, ok := v.(map[string]interface{}); ok { + for _, v := range child { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } else { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } + return nil +} + +func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { + + var ( + spd *schemaPoolDocument + draft *Draft + ok bool + err error + ) + + if internalLogEnabled { + internalLog("Get Document ( %s )", reference.String()) + } + + // Create a deep copy, so we can remove the fragment part later on without altering the original + refToURL, _ := gojsonreference.NewJsonReference(reference.String()) + + // First check if the given fragment is a location independent identifier + // http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3 + + if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok { + if internalLogEnabled { + internalLog(" From pool") + } + return spd, nil + } + + // If the given reference is not a location independent identifier, + // strip the fragment and look for a document with it's base URI + + refToURL.GetUrl().Fragment = "" + + if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok { + document, _, err := reference.GetPointer().Get(cachedSpd.Document) + + if err != nil { + return nil, err + } + + if internalLogEnabled { + internalLog(" From pool") + } + + spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft} + p.schemaPoolDocuments[reference.String()] = spd + + return spd, nil + } + + // It is not possible to load anything remotely that is not canonical... + if !reference.IsCanonical() { + return nil, errors.New(formatErrorDescription( + Locale.ReferenceMustBeCanonical(), + ErrorDetails{"reference": reference.String()}, + )) + } + + jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) + document, err := jsonReferenceLoader.LoadJSON() + + if err != nil { + return nil, err + } + + // add the whole document to the pool for potential re-use + p.parseReferences(document, refToURL, true) + + _, draft, _ = parseSchemaURL(document) + + // resolve the potential fragment and also cache it + document, _, err = reference.GetPointer().Get(document) + + if err != nil { + return nil, err + } + + return &schemaPoolDocument{Document: document, Draft: draft}, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go new file mode 100644 index 00000000..6e5e1b5c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go @@ -0,0 +1,68 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Pool of referenced schemas. +// +// created 25-06-2013 + +package gojsonschema + +import ( + "fmt" +) + +type schemaReferencePool struct { + documents map[string]*subSchema +} + +func newSchemaReferencePool() *schemaReferencePool { + + p := &schemaReferencePool{} + p.documents = make(map[string]*subSchema) + + return p +} + +func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + } + + if sch, ok := p.documents[ref]; ok { + if internalLogEnabled { + internalLog(fmt.Sprintf(" From pool")) + } + return sch, true + } + + return nil, false +} + +func (p *schemaReferencePool) Add(ref string, sch *subSchema) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + } + if _, ok := p.documents[ref]; !ok { + p.documents[ref] = sch + } +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go new file mode 100644 index 00000000..36b447a2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go @@ -0,0 +1,83 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Helper structure to handle schema types, and the combination of them. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "strings" +) + +type jsonSchemaType struct { + types []string +} + +// Is the schema typed ? that is containing at least one type +// When not typed, the schema does not need any type validation +func (t *jsonSchemaType) IsTyped() bool { + return len(t.types) > 0 +} + +func (t *jsonSchemaType) Add(etype string) error { + + if !isStringInSlice(JSON_TYPES, etype) { + return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) + } + + if t.Contains(etype) { + return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) + } + + t.types = append(t.types, etype) + + return nil +} + +func (t *jsonSchemaType) Contains(etype string) bool { + + for _, v := range t.types { + if v == etype { + return true + } + } + + return false +} + +func (t *jsonSchemaType) String() string { + + if len(t.types) == 0 { + return STRING_UNDEFINED // should never happen + } + + // Displayed as a list [type1,type2,...] + if len(t.types) > 1 { + return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) + } + + // Only one type: name only + return t.types[0] +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go new file mode 100644 index 00000000..ec779812 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go @@ -0,0 +1,149 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines the structure of a sub-subSchema. +// A sub-subSchema can contain other sub-schemas. +// +// created 27-02-2013 + +package gojsonschema + +import ( + "github.com/xeipuuv/gojsonreference" + "math/big" + "regexp" +) + +// Constants +const ( + KEY_SCHEMA = "$schema" + KEY_ID = "id" + KEY_ID_NEW = "$id" + KEY_REF = "$ref" + KEY_TITLE = "title" + KEY_DESCRIPTION = "description" + KEY_TYPE = "type" + KEY_ITEMS = "items" + KEY_ADDITIONAL_ITEMS = "additionalItems" + KEY_PROPERTIES = "properties" + KEY_PATTERN_PROPERTIES = "patternProperties" + KEY_ADDITIONAL_PROPERTIES = "additionalProperties" + KEY_PROPERTY_NAMES = "propertyNames" + KEY_DEFINITIONS = "definitions" + KEY_MULTIPLE_OF = "multipleOf" + KEY_MINIMUM = "minimum" + KEY_MAXIMUM = "maximum" + KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" + KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" + KEY_MIN_LENGTH = "minLength" + KEY_MAX_LENGTH = "maxLength" + KEY_PATTERN = "pattern" + KEY_FORMAT = "format" + KEY_MIN_PROPERTIES = "minProperties" + KEY_MAX_PROPERTIES = "maxProperties" + KEY_DEPENDENCIES = "dependencies" + KEY_REQUIRED = "required" + KEY_MIN_ITEMS = "minItems" + KEY_MAX_ITEMS = "maxItems" + KEY_UNIQUE_ITEMS = "uniqueItems" + KEY_CONTAINS = "contains" + KEY_CONST = "const" + KEY_ENUM = "enum" + KEY_ONE_OF = "oneOf" + KEY_ANY_OF = "anyOf" + KEY_ALL_OF = "allOf" + KEY_NOT = "not" + KEY_IF = "if" + KEY_THEN = "then" + KEY_ELSE = "else" +) + +type subSchema struct { + draft *Draft + + // basic subSchema meta properties + id *gojsonreference.JsonReference + title *string + description *string + + property string + + // Quick pass/fail for boolean schemas + pass *bool + + // Types associated with the subSchema + types jsonSchemaType + + // Reference url + ref *gojsonreference.JsonReference + // Schema referenced + refSchema *subSchema + + // hierarchy + parent *subSchema + itemsChildren []*subSchema + itemsChildrenIsSingleSchema bool + propertiesChildren []*subSchema + + // validation : number / integer + multipleOf *big.Rat + maximum *big.Rat + exclusiveMaximum *big.Rat + minimum *big.Rat + exclusiveMinimum *big.Rat + + // validation : string + minLength *int + maxLength *int + pattern *regexp.Regexp + format string + + // validation : object + minProperties *int + maxProperties *int + required []string + + dependencies map[string]interface{} + additionalProperties interface{} + patternProperties map[string]*subSchema + propertyNames *subSchema + + // validation : array + minItems *int + maxItems *int + uniqueItems bool + contains *subSchema + + additionalItems interface{} + + // validation : all + _const *string //const is a golang keyword + enum []string + + // validation : subSchema + oneOf []*subSchema + anyOf []*subSchema + allOf []*subSchema + not *subSchema + _if *subSchema // if/else are golang keywords + _then *subSchema + _else *subSchema +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go new file mode 100644 index 00000000..0e6fd517 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/types.go @@ -0,0 +1,62 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const types for schema and JSON. +// +// created 28-02-2013 + +package gojsonschema + +// Type constants +const ( + TYPE_ARRAY = `array` + TYPE_BOOLEAN = `boolean` + TYPE_INTEGER = `integer` + TYPE_NUMBER = `number` + TYPE_NULL = `null` + TYPE_OBJECT = `object` + TYPE_STRING = `string` +) + +// JSON_TYPES hosts the list of type that are supported in JSON +var JSON_TYPES []string + +// SCHEMA_TYPES hosts the list of type that are supported in schemas +var SCHEMA_TYPES []string + +func init() { + JSON_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_NULL, + TYPE_OBJECT, + TYPE_STRING} + + SCHEMA_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_OBJECT, + TYPE_STRING} +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go new file mode 100644 index 00000000..a17d22e3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go @@ -0,0 +1,197 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Various utility functions. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" +) + +func isKind(what interface{}, kinds ...reflect.Kind) bool { + target := what + if isJSONNumber(what) { + // JSON Numbers are strings! + target = *mustBeNumber(what) + } + targetKind := reflect.ValueOf(target).Kind() + for _, kind := range kinds { + if targetKind == kind { + return true + } + } + return false +} + +func existsMapKey(m map[string]interface{}, k string) bool { + _, ok := m[k] + return ok +} + +func isStringInSlice(s []string, what string) bool { + for i := range s { + if s[i] == what { + return true + } + } + return false +} + +// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s. +func indexStringInSlice(s []string, what string) int { + for i := range s { + if s[i] == what { + return i + } + } + return -1 +} + +func marshalToJSONString(value interface{}) (*string, error) { + + mBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + sBytes := string(mBytes) + return &sBytes, nil +} + +func marshalWithoutNumber(value interface{}) (*string, error) { + + // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber + // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1 + // One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber + // so that these differences in representation are removed + + jsonString, err := marshalToJSONString(value) + if err != nil { + return nil, err + } + + var document interface{} + + err = json.Unmarshal([]byte(*jsonString), &document) + if err != nil { + return nil, err + } + + return marshalToJSONString(document) +} + +func isJSONNumber(what interface{}) bool { + + switch what.(type) { + + case json.Number: + return true + } + + return false +} + +func checkJSONInteger(what interface{}) (isInt bool) { + + jsonNumber := what.(json.Number) + + bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber)) + + return isValidNumber && bigFloat.IsInt() + +} + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +func mustBeInteger(what interface{}) *int { + + if isJSONNumber(what) { + + number := what.(json.Number) + + isInt := checkJSONInteger(number) + + if isInt { + + int64Value, err := number.Int64() + if err != nil { + return nil + } + + int32Value := int(int64Value) + return &int32Value + } + + } + + return nil +} + +func mustBeNumber(what interface{}) *big.Rat { + + if isJSONNumber(what) { + number := what.(json.Number) + float64Value, success := new(big.Rat).SetString(string(number)) + if success { + return float64Value + } + } + + return nil + +} + +func convertDocumentNode(val interface{}) interface{} { + + if lval, ok := val.([]interface{}); ok { + + res := []interface{}{} + for _, v := range lval { + res = append(res, convertDocumentNode(v)) + } + + return res + + } + + if mval, ok := val.(map[interface{}]interface{}); ok { + + res := map[string]interface{}{} + + for k, v := range mval { + res[k.(string)] = convertDocumentNode(v) + } + + return res + + } + + return val +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go new file mode 100644 index 00000000..74091bca --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go @@ -0,0 +1,858 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Extends Schema and subSchema, implements the validation phase. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +// Validate loads and validates a JSON schema +func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { + // load schema + schema, err := NewSchema(ls) + if err != nil { + return nil, err + } + return schema.Validate(ld) +} + +// Validate loads and validates a JSON document +func (v *Schema) Validate(l JSONLoader) (*Result, error) { + root, err := l.LoadJSON() + if err != nil { + return nil, err + } + return v.validateDocument(root), nil +} + +func (v *Schema) validateDocument(root interface{}) *Result { + result := &Result{} + context := NewJsonContext(STRING_CONTEXT_ROOT, nil) + v.rootSchema.validateRecursive(v.rootSchema, root, result, context) + return result +} + +func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result { + result := &Result{} + v.validateRecursive(v, document, result, context) + return result +} + +// Walker function to validate the json recursively against the subSchema +func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateRecursive %s", context.String()) + internalLog(" %v", currentNode) + } + + // Handle true/false schema as early as possible as all other fields will be nil + if currentSubSchema.pass != nil { + if !*currentSubSchema.pass { + result.addInternalError( + new(FalseError), + context, + currentNode, + ErrorDetails{}, + ) + } + return + } + + // Handle referenced schemas, returns directly when a $ref is found + if currentSubSchema.refSchema != nil { + v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) + return + } + + // Check for null value + if currentNode == nil { + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_NULL, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) + v.validateCommon(currentSubSchema, currentNode, result, context) + + } else { // Not a null value + + if isJSONNumber(currentNode) { + + value := currentNode.(json.Number) + + isInt := checkJSONInteger(value) + + validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER)) + + if currentSubSchema.types.IsTyped() && !validType { + + givenType := TYPE_INTEGER + if !isInt { + givenType = TYPE_NUMBER + } + + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": givenType, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } else { + + rValue := reflect.ValueOf(currentNode) + rKind := rValue.Kind() + + switch rKind { + + // Slice => JSON array + + case reflect.Slice: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_ARRAY, + }, + ) + return + } + + castCurrentNode := currentNode.([]interface{}) + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateArray(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + // Map => JSON object + + case reflect.Map: + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_OBJECT, + }, + ) + return + } + + castCurrentNode, ok := currentNode.(map[string]interface{}) + if !ok { + castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) + } + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateObject(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + for _, pSchema := range currentSubSchema.propertiesChildren { + nextNode, ok := castCurrentNode[pSchema.property] + if ok { + subContext := NewJsonContext(pSchema.property, context) + v.validateRecursive(pSchema, nextNode, result, subContext) + } + } + + // Simple JSON values : string, number, boolean + + case reflect.Bool: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_BOOLEAN, + }, + ) + return + } + + value := currentNode.(bool) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + case reflect.String: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_STRING, + }, + ) + return + } + + value := currentNode.(string) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } + + } + + } + + result.incrementScore() +} + +// Different kinds of validation there, subSchema / common / array / object / string... +func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateSchema %s", context.String()) + internalLog(" %v", currentNode) + } + + if len(currentSubSchema.anyOf) > 0 { + + validatedAnyOf := false + var bestValidationResult *Result + + for _, anyOfSchema := range currentSubSchema.anyOf { + if !validatedAnyOf { + validationResult := anyOfSchema.subValidateWithContext(currentNode, context) + validatedAnyOf = validationResult.Valid() + + if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + } + if !validatedAnyOf { + + result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) + + if bestValidationResult != nil { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + } + + if len(currentSubSchema.oneOf) > 0 { + + nbValidated := 0 + var bestValidationResult *Result + + for _, oneOfSchema := range currentSubSchema.oneOf { + validationResult := oneOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + + if nbValidated != 1 { + + result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) + + if nbValidated == 0 { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + + } + + if len(currentSubSchema.allOf) > 0 { + nbValidated := 0 + + for _, allOfSchema := range currentSubSchema.allOf { + validationResult := allOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } + result.mergeErrors(validationResult) + } + + if nbValidated != len(currentSubSchema.allOf) { + result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.not != nil { + validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if isKind(currentNode, reflect.Map) { + for elementKey := range currentNode.(map[string]interface{}) { + if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { + switch dependency := dependency.(type) { + + case []string: + for _, dependOnKey := range dependency { + if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { + result.addInternalError( + new(MissingDependencyError), + context, + currentNode, + ErrorDetails{"dependency": dependOnKey}, + ) + } + } + + case *subSchema: + dependency.validateRecursive(dependency, currentNode, result, context) + } + } + } + } + } + + if currentSubSchema._if != nil { + validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context) + if currentSubSchema._then != nil && validationResultIf.Valid() { + validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context) + if !validationResultThen.Valid() { + result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultThen) + } + } + if currentSubSchema._else != nil && !validationResultIf.Valid() { + validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context) + if !validationResultElse.Valid() { + result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultElse) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateCommon %s", context.String()) + internalLog(" %v", value) + } + + // const: + if currentSubSchema._const != nil { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if *vString != *currentSubSchema._const { + result.addInternalError(new(ConstError), + context, + value, + ErrorDetails{ + "allowed": *currentSubSchema._const, + }, + ) + } + } + + // enum: + if len(currentSubSchema.enum) > 0 { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if !isStringInSlice(currentSubSchema.enum, *vString) { + result.addInternalError( + new(EnumError), + context, + value, + ErrorDetails{ + "allowed": strings.Join(currentSubSchema.enum, ", "), + }, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateArray %s", context.String()) + internalLog(" %v", value) + } + + nbValues := len(value) + + // TODO explain + if currentSubSchema.itemsChildrenIsSingleSchema { + for i := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } else { + if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { + + nbItems := len(currentSubSchema.itemsChildren) + + // while we have both schemas and values, check them against each other + for i := 0; i != nbItems && i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + + if nbItems < nbValues { + // we have less schemas than elements in the instance array, + // but that might be ok if "additionalItems" is specified. + + switch currentSubSchema.additionalItems.(type) { + case bool: + if !currentSubSchema.additionalItems.(bool) { + result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) + } + case *subSchema: + additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) + for i := nbItems; i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } + } + } + } + + // minItems & maxItems + if currentSubSchema.minItems != nil { + if nbValues < int(*currentSubSchema.minItems) { + result.addInternalError( + new(ArrayMinItemsError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minItems}, + ) + } + } + if currentSubSchema.maxItems != nil { + if nbValues > int(*currentSubSchema.maxItems) { + result.addInternalError( + new(ArrayMaxItemsError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxItems}, + ) + } + } + + // uniqueItems: + if currentSubSchema.uniqueItems { + var stringifiedItems = make(map[string]int) + for j, v := range value { + vString, err := marshalWithoutNumber(v) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err}) + } + if i, ok := stringifiedItems[*vString]; ok { + result.addInternalError( + new(ItemsMustBeUniqueError), + context, + value, + ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j}, + ) + } + stringifiedItems[*vString] = j + } + } + + // contains: + + if currentSubSchema.contains != nil { + validatedOne := false + var bestValidationResult *Result + + for i, v := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + + validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext) + if validationResult.Valid() { + validatedOne = true + break + } else { + if bestValidationResult == nil || validationResult.score > bestValidationResult.score { + bestValidationResult = validationResult + } + } + } + if !validatedOne { + result.addInternalError( + new(ArrayContainsError), + context, + value, + ErrorDetails{}, + ) + if bestValidationResult != nil { + result.mergeErrors(bestValidationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateObject %s", context.String()) + internalLog(" %v", value) + } + + // minProperties & maxProperties: + if currentSubSchema.minProperties != nil { + if len(value) < int(*currentSubSchema.minProperties) { + result.addInternalError( + new(ArrayMinPropertiesError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minProperties}, + ) + } + } + if currentSubSchema.maxProperties != nil { + if len(value) > int(*currentSubSchema.maxProperties) { + result.addInternalError( + new(ArrayMaxPropertiesError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxProperties}, + ) + } + } + + // required: + for _, requiredProperty := range currentSubSchema.required { + _, ok := value[requiredProperty] + if ok { + result.incrementScore() + } else { + result.addInternalError( + new(RequiredError), + context, + value, + ErrorDetails{"property": requiredProperty}, + ) + } + } + + // additionalProperty & patternProperty: + for pk := range value { + + // Check whether this property is described by "properties" + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + // Check whether this property is described by "patternProperties" + ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + // If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties" + if !found && !ppMatch { + switch ap := currentSubSchema.additionalProperties.(type) { + case bool: + // Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema + if !ap { + result.addInternalError( + new(AdditionalPropertyNotAllowedError), + context, + value[pk], + ErrorDetails{"property": pk}, + ) + + } + case *subSchema: + validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context)) + result.mergeErrors(validationResult) + } + } + } + + // propertyNames: + if currentSubSchema.propertyNames != nil { + for pk := range value { + validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context) + if !validationResult.Valid() { + result.addInternalError(new(InvalidPropertyNameError), + context, + value, ErrorDetails{ + "property": pk, + }) + result.mergeErrors(validationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool { + + if internalLogEnabled { + internalLog("validatePatternProperty %s", context.String()) + internalLog(" %s %v", key, value) + } + + validated := false + + for pk, pv := range currentSubSchema.patternProperties { + if matches, _ := regexp.MatchString(pk, key); matches { + validated = true + subContext := NewJsonContext(key, context) + validationResult := pv.subValidateWithContext(value, subContext) + result.mergeErrors(validationResult) + } + } + + if !validated { + return false + } + + result.incrementScore() + return true +} + +func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore JSON numbers + if isJSONNumber(value) { + return + } + + // Ignore non strings + if !isKind(value, reflect.String) { + return + } + + if internalLogEnabled { + internalLog("validateString %s", context.String()) + internalLog(" %v", value) + } + + stringValue := value.(string) + + // minLength & maxLength: + if currentSubSchema.minLength != nil { + if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { + result.addInternalError( + new(StringLengthGTEError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minLength}, + ) + } + } + if currentSubSchema.maxLength != nil { + if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { + result.addInternalError( + new(StringLengthLTEError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxLength}, + ) + } + } + + // pattern: + if currentSubSchema.pattern != nil { + if !currentSubSchema.pattern.MatchString(stringValue) { + result.addInternalError( + new(DoesNotMatchPatternError), + context, + value, + ErrorDetails{"pattern": currentSubSchema.pattern}, + ) + + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore non numbers + if !isJSONNumber(value) { + return + } + + if internalLogEnabled { + internalLog("validateNumber %s", context.String()) + internalLog(" %v", value) + } + + number := value.(json.Number) + float64Value, _ := new(big.Rat).SetString(string(number)) + + // multipleOf: + if currentSubSchema.multipleOf != nil { + if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() { + result.addInternalError( + new(MultipleOfError), + context, + number, + ErrorDetails{ + "multiple": new(big.Float).SetRat(currentSubSchema.multipleOf), + }, + ) + } + } + + //maximum & exclusiveMaximum: + if currentSubSchema.maximum != nil { + if float64Value.Cmp(currentSubSchema.maximum) == 1 { + result.addInternalError( + new(NumberLTEError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.maximum), + }, + ) + } + } + if currentSubSchema.exclusiveMaximum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 { + result.addInternalError( + new(NumberLTError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum), + }, + ) + } + } + + //minimum & exclusiveMinimum: + if currentSubSchema.minimum != nil { + if float64Value.Cmp(currentSubSchema.minimum) == -1 { + result.addInternalError( + new(NumberGTEError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.minimum), + }, + ) + } + } + if currentSubSchema.exclusiveMinimum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 { + result.addInternalError( + new(NumberGTError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum), + }, + ) + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes deleted file mode 100644 index d2f212e5..00000000 --- a/vendor/golang.org/x/net/.gitattributes +++ /dev/null @@ -1,10 +0,0 @@ -# Treat all files in this repo as binary, with no git magic updating -# line endings. Windows users contributing to Go will need to use a -# modern version of git and editors capable of LF line endings. -# -# We'll prevent accidental CRLF line endings from entering the repo -# via the git-review gofmt checks. -# -# See golang.org/issue/9281 - -* -text diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore deleted file mode 100644 index 8339fd61..00000000 --- a/vendor/golang.org/x/net/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Add no patterns to .hgignore except for files generated by the build. -last-change diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md deleted file mode 100644 index 88dff59b..00000000 --- a/vendor/golang.org/x/net/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/net/README b/vendor/golang.org/x/net/README deleted file mode 100644 index 6b13d8e5..00000000 --- a/vendor/golang.org/x/net/README +++ /dev/null @@ -1,3 +0,0 @@ -This repository holds supplementary Go networking libraries. - -To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go index 3b4fd089..f9dc0e8e 100644 --- a/vendor/golang.org/x/net/bpf/instructions.go +++ b/vendor/golang.org/x/net/bpf/instructions.go @@ -198,7 +198,7 @@ func (a LoadConstant) Assemble() (RawInstruction, error) { return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a LoadConstant) String() string { switch a.Dst { case RegA: @@ -224,7 +224,7 @@ func (a LoadScratch) Assemble() (RawInstruction, error) { return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a LoadScratch) String() string { switch a.Dst { case RegA: @@ -248,7 +248,7 @@ func (a LoadAbsolute) Assemble() (RawInstruction, error) { return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a LoadAbsolute) String() string { switch a.Size { case 1: // byte @@ -277,7 +277,7 @@ func (a LoadIndirect) Assemble() (RawInstruction, error) { return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a LoadIndirect) String() string { switch a.Size { case 1: // byte @@ -306,7 +306,7 @@ func (a LoadMemShift) Assemble() (RawInstruction, error) { return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a LoadMemShift) String() string { return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) } @@ -325,7 +325,7 @@ func (a LoadExtension) Assemble() (RawInstruction, error) { return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a LoadExtension) String() string { switch a.Num { case ExtLen: @@ -392,7 +392,7 @@ func (a StoreScratch) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a StoreScratch) String() string { switch a.Src { case RegA: @@ -418,7 +418,7 @@ func (a ALUOpConstant) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a ALUOpConstant) String() string { switch a.Op { case ALUOpAdd: @@ -458,7 +458,7 @@ func (a ALUOpX) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a ALUOpX) String() string { switch a.Op { case ALUOpAdd: @@ -496,7 +496,7 @@ func (a NegateA) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a NegateA) String() string { return fmt.Sprintf("neg") } @@ -514,7 +514,7 @@ func (a Jump) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a Jump) String() string { return fmt.Sprintf("ja %d", a.Skip) } @@ -566,7 +566,7 @@ func (a JumpIf) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a JumpIf) String() string { switch a.Cond { // K == A @@ -621,7 +621,7 @@ func (a RetA) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a RetA) String() string { return fmt.Sprintf("ret a") } @@ -639,7 +639,7 @@ func (a RetConstant) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a RetConstant) String() string { return fmt.Sprintf("ret #%d", a.Val) } @@ -654,7 +654,7 @@ func (a TXA) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a TXA) String() string { return fmt.Sprintf("txa") } @@ -669,7 +669,7 @@ func (a TAX) Assemble() (RawInstruction, error) { }, nil } -// String returns the the instruction in assembler notation. +// String returns the instruction in assembler notation. func (a TAX) String() string { return fmt.Sprintf("tax") } diff --git a/vendor/golang.org/x/net/bpf/instructions_test.go b/vendor/golang.org/x/net/bpf/instructions_test.go deleted file mode 100644 index dde474ab..00000000 --- a/vendor/golang.org/x/net/bpf/instructions_test.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -import ( - "fmt" - "io/ioutil" - "reflect" - "strconv" - "strings" - "testing" -) - -// This is a direct translation of the program in -// testdata/all_instructions.txt. -var allInstructions = []Instruction{ - LoadConstant{Dst: RegA, Val: 42}, - LoadConstant{Dst: RegX, Val: 42}, - - LoadScratch{Dst: RegA, N: 3}, - LoadScratch{Dst: RegX, N: 3}, - - LoadAbsolute{Off: 42, Size: 1}, - LoadAbsolute{Off: 42, Size: 2}, - LoadAbsolute{Off: 42, Size: 4}, - - LoadIndirect{Off: 42, Size: 1}, - LoadIndirect{Off: 42, Size: 2}, - LoadIndirect{Off: 42, Size: 4}, - - LoadMemShift{Off: 42}, - - LoadExtension{Num: ExtLen}, - LoadExtension{Num: ExtProto}, - LoadExtension{Num: ExtType}, - LoadExtension{Num: ExtRand}, - - StoreScratch{Src: RegA, N: 3}, - StoreScratch{Src: RegX, N: 3}, - - ALUOpConstant{Op: ALUOpAdd, Val: 42}, - ALUOpConstant{Op: ALUOpSub, Val: 42}, - ALUOpConstant{Op: ALUOpMul, Val: 42}, - ALUOpConstant{Op: ALUOpDiv, Val: 42}, - ALUOpConstant{Op: ALUOpOr, Val: 42}, - ALUOpConstant{Op: ALUOpAnd, Val: 42}, - ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, - ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, - ALUOpConstant{Op: ALUOpMod, Val: 42}, - ALUOpConstant{Op: ALUOpXor, Val: 42}, - - ALUOpX{Op: ALUOpAdd}, - ALUOpX{Op: ALUOpSub}, - ALUOpX{Op: ALUOpMul}, - ALUOpX{Op: ALUOpDiv}, - ALUOpX{Op: ALUOpOr}, - ALUOpX{Op: ALUOpAnd}, - ALUOpX{Op: ALUOpShiftLeft}, - ALUOpX{Op: ALUOpShiftRight}, - ALUOpX{Op: ALUOpMod}, - ALUOpX{Op: ALUOpXor}, - - NegateA{}, - - Jump{Skip: 10}, - JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, - JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, - JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, - JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, - JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, - JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, - JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, - - TAX{}, - TXA{}, - - RetA{}, - RetConstant{Val: 42}, -} -var allInstructionsExpected = "testdata/all_instructions.bpf" - -// Check that we produce the same output as the canonical bpf_asm -// linux kernel tool. -func TestInterop(t *testing.T) { - out, err := Assemble(allInstructions) - if err != nil { - t.Fatalf("assembly of allInstructions program failed: %s", err) - } - t.Logf("Assembled program is %d instructions long", len(out)) - - bs, err := ioutil.ReadFile(allInstructionsExpected) - if err != nil { - t.Fatalf("reading %s: %s", allInstructionsExpected, err) - } - // First statement is the number of statements, last statement is - // empty. We just ignore both and rely on slice length. - stmts := strings.Split(string(bs), ",") - if len(stmts)-2 != len(out) { - t.Fatalf("test program lengths don't match: %s has %d, Go implementation has %d", allInstructionsExpected, len(stmts)-2, len(allInstructions)) - } - - for i, stmt := range stmts[1 : len(stmts)-2] { - nums := strings.Split(stmt, " ") - if len(nums) != 4 { - t.Fatalf("malformed instruction %d in %s: %s", i+1, allInstructionsExpected, stmt) - } - - actual := out[i] - - op, err := strconv.ParseUint(nums[0], 10, 16) - if err != nil { - t.Fatalf("malformed opcode %s in instruction %d of %s", nums[0], i+1, allInstructionsExpected) - } - if actual.Op != uint16(op) { - t.Errorf("opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x", i+1, allInstructions[i], actual.Op, op) - } - - jt, err := strconv.ParseUint(nums[1], 10, 8) - if err != nil { - t.Fatalf("malformed jt offset %s in instruction %d of %s", nums[1], i+1, allInstructionsExpected) - } - if actual.Jt != uint8(jt) { - t.Errorf("jt mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jt, jt) - } - - jf, err := strconv.ParseUint(nums[2], 10, 8) - if err != nil { - t.Fatalf("malformed jf offset %s in instruction %d of %s", nums[2], i+1, allInstructionsExpected) - } - if actual.Jf != uint8(jf) { - t.Errorf("jf mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jf, jf) - } - - k, err := strconv.ParseUint(nums[3], 10, 32) - if err != nil { - t.Fatalf("malformed constant %s in instruction %d of %s", nums[3], i+1, allInstructionsExpected) - } - if actual.K != uint32(k) { - t.Errorf("constant mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.K, k) - } - } -} - -// Check that assembly and disassembly match each other. -func TestAsmDisasm(t *testing.T) { - prog1, err := Assemble(allInstructions) - if err != nil { - t.Fatalf("assembly of allInstructions program failed: %s", err) - } - t.Logf("Assembled program is %d instructions long", len(prog1)) - - got, allDecoded := Disassemble(prog1) - if !allDecoded { - t.Errorf("Disassemble(Assemble(allInstructions)) produced unrecognized instructions:") - for i, inst := range got { - if r, ok := inst.(RawInstruction); ok { - t.Logf(" insn %d, %#v --> %#v", i+1, allInstructions[i], r) - } - } - } - - if len(allInstructions) != len(got) { - t.Fatalf("disassembly changed program size: %d insns before, %d insns after", len(allInstructions), len(got)) - } - if !reflect.DeepEqual(allInstructions, got) { - t.Errorf("program mutated by disassembly:") - for i := range got { - if !reflect.DeepEqual(allInstructions[i], got[i]) { - t.Logf(" insn %d, s: %#v, p1: %#v, got: %#v", i+1, allInstructions[i], prog1[i], got[i]) - } - } - } -} - -type InvalidInstruction struct{} - -func (a InvalidInstruction) Assemble() (RawInstruction, error) { - return RawInstruction{}, fmt.Errorf("Invalid Instruction") -} - -func (a InvalidInstruction) String() string { - return fmt.Sprintf("unknown instruction: %#v", a) -} - -func TestString(t *testing.T) { - testCases := []struct { - instruction Instruction - assembler string - }{ - { - instruction: LoadConstant{Dst: RegA, Val: 42}, - assembler: "ld #42", - }, - { - instruction: LoadConstant{Dst: RegX, Val: 42}, - assembler: "ldx #42", - }, - { - instruction: LoadConstant{Dst: 0xffff, Val: 42}, - assembler: "unknown instruction: bpf.LoadConstant{Dst:0xffff, Val:0x2a}", - }, - { - instruction: LoadScratch{Dst: RegA, N: 3}, - assembler: "ld M[3]", - }, - { - instruction: LoadScratch{Dst: RegX, N: 3}, - assembler: "ldx M[3]", - }, - { - instruction: LoadScratch{Dst: 0xffff, N: 3}, - assembler: "unknown instruction: bpf.LoadScratch{Dst:0xffff, N:3}", - }, - { - instruction: LoadAbsolute{Off: 42, Size: 1}, - assembler: "ldb [42]", - }, - { - instruction: LoadAbsolute{Off: 42, Size: 2}, - assembler: "ldh [42]", - }, - { - instruction: LoadAbsolute{Off: 42, Size: 4}, - assembler: "ld [42]", - }, - { - instruction: LoadAbsolute{Off: 42, Size: -1}, - assembler: "unknown instruction: bpf.LoadAbsolute{Off:0x2a, Size:-1}", - }, - { - instruction: LoadIndirect{Off: 42, Size: 1}, - assembler: "ldb [x + 42]", - }, - { - instruction: LoadIndirect{Off: 42, Size: 2}, - assembler: "ldh [x + 42]", - }, - { - instruction: LoadIndirect{Off: 42, Size: 4}, - assembler: "ld [x + 42]", - }, - { - instruction: LoadIndirect{Off: 42, Size: -1}, - assembler: "unknown instruction: bpf.LoadIndirect{Off:0x2a, Size:-1}", - }, - { - instruction: LoadMemShift{Off: 42}, - assembler: "ldx 4*([42]&0xf)", - }, - { - instruction: LoadExtension{Num: ExtLen}, - assembler: "ld #len", - }, - { - instruction: LoadExtension{Num: ExtProto}, - assembler: "ld #proto", - }, - { - instruction: LoadExtension{Num: ExtType}, - assembler: "ld #type", - }, - { - instruction: LoadExtension{Num: ExtPayloadOffset}, - assembler: "ld #poff", - }, - { - instruction: LoadExtension{Num: ExtInterfaceIndex}, - assembler: "ld #ifidx", - }, - { - instruction: LoadExtension{Num: ExtNetlinkAttr}, - assembler: "ld #nla", - }, - { - instruction: LoadExtension{Num: ExtNetlinkAttrNested}, - assembler: "ld #nlan", - }, - { - instruction: LoadExtension{Num: ExtMark}, - assembler: "ld #mark", - }, - { - instruction: LoadExtension{Num: ExtQueue}, - assembler: "ld #queue", - }, - { - instruction: LoadExtension{Num: ExtLinkLayerType}, - assembler: "ld #hatype", - }, - { - instruction: LoadExtension{Num: ExtRXHash}, - assembler: "ld #rxhash", - }, - { - instruction: LoadExtension{Num: ExtCPUID}, - assembler: "ld #cpu", - }, - { - instruction: LoadExtension{Num: ExtVLANTag}, - assembler: "ld #vlan_tci", - }, - { - instruction: LoadExtension{Num: ExtVLANTagPresent}, - assembler: "ld #vlan_avail", - }, - { - instruction: LoadExtension{Num: ExtVLANProto}, - assembler: "ld #vlan_tpid", - }, - { - instruction: LoadExtension{Num: ExtRand}, - assembler: "ld #rand", - }, - { - instruction: LoadAbsolute{Off: 0xfffff038, Size: 4}, - assembler: "ld #rand", - }, - { - instruction: LoadExtension{Num: 0xfff}, - assembler: "unknown instruction: bpf.LoadExtension{Num:4095}", - }, - { - instruction: StoreScratch{Src: RegA, N: 3}, - assembler: "st M[3]", - }, - { - instruction: StoreScratch{Src: RegX, N: 3}, - assembler: "stx M[3]", - }, - { - instruction: StoreScratch{Src: 0xffff, N: 3}, - assembler: "unknown instruction: bpf.StoreScratch{Src:0xffff, N:3}", - }, - { - instruction: ALUOpConstant{Op: ALUOpAdd, Val: 42}, - assembler: "add #42", - }, - { - instruction: ALUOpConstant{Op: ALUOpSub, Val: 42}, - assembler: "sub #42", - }, - { - instruction: ALUOpConstant{Op: ALUOpMul, Val: 42}, - assembler: "mul #42", - }, - { - instruction: ALUOpConstant{Op: ALUOpDiv, Val: 42}, - assembler: "div #42", - }, - { - instruction: ALUOpConstant{Op: ALUOpOr, Val: 42}, - assembler: "or #42", - }, - { - instruction: ALUOpConstant{Op: ALUOpAnd, Val: 42}, - assembler: "and #42", - }, - { - instruction: ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, - assembler: "lsh #42", - }, - { - instruction: ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, - assembler: "rsh #42", - }, - { - instruction: ALUOpConstant{Op: ALUOpMod, Val: 42}, - assembler: "mod #42", - }, - { - instruction: ALUOpConstant{Op: ALUOpXor, Val: 42}, - assembler: "xor #42", - }, - { - instruction: ALUOpConstant{Op: 0xffff, Val: 42}, - assembler: "unknown instruction: bpf.ALUOpConstant{Op:0xffff, Val:0x2a}", - }, - { - instruction: ALUOpX{Op: ALUOpAdd}, - assembler: "add x", - }, - { - instruction: ALUOpX{Op: ALUOpSub}, - assembler: "sub x", - }, - { - instruction: ALUOpX{Op: ALUOpMul}, - assembler: "mul x", - }, - { - instruction: ALUOpX{Op: ALUOpDiv}, - assembler: "div x", - }, - { - instruction: ALUOpX{Op: ALUOpOr}, - assembler: "or x", - }, - { - instruction: ALUOpX{Op: ALUOpAnd}, - assembler: "and x", - }, - { - instruction: ALUOpX{Op: ALUOpShiftLeft}, - assembler: "lsh x", - }, - { - instruction: ALUOpX{Op: ALUOpShiftRight}, - assembler: "rsh x", - }, - { - instruction: ALUOpX{Op: ALUOpMod}, - assembler: "mod x", - }, - { - instruction: ALUOpX{Op: ALUOpXor}, - assembler: "xor x", - }, - { - instruction: ALUOpX{Op: 0xffff}, - assembler: "unknown instruction: bpf.ALUOpX{Op:0xffff}", - }, - { - instruction: NegateA{}, - assembler: "neg", - }, - { - instruction: Jump{Skip: 10}, - assembler: "ja 10", - }, - { - instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, - assembler: "jeq #42,8,9", - }, - { - instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8}, - assembler: "jeq #42,8", - }, - { - instruction: JumpIf{Cond: JumpEqual, Val: 42, SkipFalse: 8}, - assembler: "jneq #42,8", - }, - { - instruction: JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, - assembler: "jneq #42,8", - }, - { - instruction: JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, - assembler: "jlt #42,7", - }, - { - instruction: JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, - assembler: "jle #42,6", - }, - { - instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, - assembler: "jgt #42,4,5", - }, - { - instruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4}, - assembler: "jgt #42,4", - }, - { - instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, - assembler: "jge #42,3,4", - }, - { - instruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3}, - assembler: "jge #42,3", - }, - { - instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, - assembler: "jset #42,2,3", - }, - { - instruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2}, - assembler: "jset #42,2", - }, - { - instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, - assembler: "jset #42,3,2", - }, - { - instruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2}, - assembler: "jset #42,0,2", - }, - { - instruction: JumpIf{Cond: 0xffff, Val: 42, SkipTrue: 1, SkipFalse: 2}, - assembler: "unknown instruction: bpf.JumpIf{Cond:0xffff, Val:0x2a, SkipTrue:0x1, SkipFalse:0x2}", - }, - { - instruction: TAX{}, - assembler: "tax", - }, - { - instruction: TXA{}, - assembler: "txa", - }, - { - instruction: RetA{}, - assembler: "ret a", - }, - { - instruction: RetConstant{Val: 42}, - assembler: "ret #42", - }, - // Invalid instruction - { - instruction: InvalidInstruction{}, - assembler: "unknown instruction: bpf.InvalidInstruction{}", - }, - } - - for _, testCase := range testCases { - if input, ok := testCase.instruction.(fmt.Stringer); ok { - got := input.String() - if got != testCase.assembler { - t.Errorf("String did not return expected assembler notation, expected: %s, got: %s", testCase.assembler, got) - } - } else { - t.Errorf("Instruction %#v is not a fmt.Stringer", testCase.instruction) - } - } -} diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf b/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf deleted file mode 100644 index f8714406..00000000 --- a/vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf +++ /dev/null @@ -1 +0,0 @@ -50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0, diff --git a/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt b/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt deleted file mode 100644 index 30455015..00000000 --- a/vendor/golang.org/x/net/bpf/testdata/all_instructions.txt +++ /dev/null @@ -1,79 +0,0 @@ -# This filter is compiled to all_instructions.bpf by the `bpf_asm` -# tool, which can be found in the linux kernel source tree under -# tools/net. - -# Load immediate -ld #42 -ldx #42 - -# Load scratch -ld M[3] -ldx M[3] - -# Load absolute -ldb [42] -ldh [42] -ld [42] - -# Load indirect -ldb [x + 42] -ldh [x + 42] -ld [x + 42] - -# Load IPv4 header length -ldx 4*([42]&0xf) - -# Run extension function -ld #len -ld #proto -ld #type -ld #rand - -# Store scratch -st M[3] -stx M[3] - -# A constant -add #42 -sub #42 -mul #42 -div #42 -or #42 -and #42 -lsh #42 -rsh #42 -mod #42 -xor #42 - -# A X -add x -sub x -mul x -div x -or x -and x -lsh x -rsh x -mod x -xor x - -# !A -neg - -# Jumps -ja end -jeq #42,prev,end -jne #42,end -jlt #42,end -jle #42,end -jgt #42,prev,end -jge #42,prev,end -jset #42,prev,end - -# Register transfers -tax -txa - -# Returns -prev: ret a -end: ret #42 diff --git a/vendor/golang.org/x/net/bpf/vm_aluop_test.go b/vendor/golang.org/x/net/bpf/vm_aluop_test.go deleted file mode 100644 index 16678244..00000000 --- a/vendor/golang.org/x/net/bpf/vm_aluop_test.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf_test - -import ( - "testing" - - "golang.org/x/net/bpf" -) - -func TestVMALUOpAdd(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpAdd, - Val: 3, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 8, 2, 3, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 3, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpSub(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.TAX{}, - bpf.ALUOpX{ - Op: bpf.ALUOpSub, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 1, 2, 3, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpMul(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpMul, - Val: 2, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 6, 2, 3, 4, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 4, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpDiv(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpDiv, - Val: 2, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 20, 2, 3, 4, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 2, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpDivByZeroALUOpConstant(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.ALUOpConstant{ - Op: bpf.ALUOpDiv, - Val: 0, - }, - bpf.RetA{}, - }) - if errStr(err) != "cannot divide by zero using ALUOpConstant" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMALUOpDivByZeroALUOpX(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - // Load byte 0 into X - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.TAX{}, - // Load byte 1 into A - bpf.LoadAbsolute{ - Off: 9, - Size: 1, - }, - // Attempt to perform 1/0 - bpf.ALUOpX{ - Op: bpf.ALUOpDiv, - }, - // Return 4 bytes if program does not terminate - bpf.LoadConstant{ - Val: 12, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, 3, 4, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpOr(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 2, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpOr, - Val: 0x01, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0x00, 0x10, 0x03, 0x04, - 0x05, 0x06, 0x07, 0x08, - 0x09, 0xff, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 9, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpAnd(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 2, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpAnd, - Val: 0x0019, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xaa, 0x09, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpShiftLeft(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpShiftLeft, - Val: 0x01, - }, - bpf.JumpIf{ - Cond: bpf.JumpEqual, - Val: 0x02, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 9, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0x01, 0xaa, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpShiftRight(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpShiftRight, - Val: 0x01, - }, - bpf.JumpIf{ - Cond: bpf.JumpEqual, - Val: 0x04, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 9, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0x08, 0xff, 0xff, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpMod(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpMod, - Val: 20, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 30, 0, 0, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 2, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpModByZeroALUOpConstant(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpMod, - Val: 0, - }, - bpf.RetA{}, - }) - if errStr(err) != "cannot divide by zero using ALUOpConstant" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMALUOpModByZeroALUOpX(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - // Load byte 0 into X - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.TAX{}, - // Load byte 1 into A - bpf.LoadAbsolute{ - Off: 9, - Size: 1, - }, - // Attempt to perform 1%0 - bpf.ALUOpX{ - Op: bpf.ALUOpMod, - }, - // Return 4 bytes if program does not terminate - bpf.LoadConstant{ - Val: 12, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, 3, 4, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpXor(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpXor, - Val: 0x0a, - }, - bpf.JumpIf{ - Cond: bpf.JumpEqual, - Val: 0x01, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 9, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0x0b, 0x00, 0x00, 0x00, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMALUOpUnknown(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.ALUOpConstant{ - Op: bpf.ALUOpAdd, - Val: 1, - }, - // Verify that an unknown operation is a no-op - bpf.ALUOpConstant{ - Op: 100, - }, - bpf.JumpIf{ - Cond: bpf.JumpEqual, - Val: 0x02, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 9, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 1, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} diff --git a/vendor/golang.org/x/net/bpf/vm_bpf_test.go b/vendor/golang.org/x/net/bpf/vm_bpf_test.go deleted file mode 100644 index 77fa8fe4..00000000 --- a/vendor/golang.org/x/net/bpf/vm_bpf_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf_test - -import ( - "net" - "runtime" - "testing" - "time" - - "golang.org/x/net/bpf" - "golang.org/x/net/ipv4" -) - -// A virtualMachine is a BPF virtual machine which can process an -// input packet against a BPF program and render a verdict. -type virtualMachine interface { - Run(in []byte) (int, error) -} - -// canUseOSVM indicates if the OS BPF VM is available on this platform. -func canUseOSVM() bool { - // OS BPF VM can only be used on platforms where x/net/ipv4 supports - // attaching a BPF program to a socket. - switch runtime.GOOS { - case "linux": - return true - } - - return false -} - -// All BPF tests against both the Go VM and OS VM are assumed to -// be used with a UDP socket. As a result, the entire contents -// of a UDP datagram is sent through the BPF program, but only -// the body after the UDP header will ever be returned in output. - -// testVM sets up a Go BPF VM, and if available, a native OS BPF VM -// for integration testing. -func testVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func(), error) { - goVM, err := bpf.NewVM(filter) - if err != nil { - // Some tests expect an error, so this error must be returned - // instead of fatally exiting the test - return nil, nil, err - } - - mvm := &multiVirtualMachine{ - goVM: goVM, - - t: t, - } - - // If available, add the OS VM for tests which verify that both the Go - // VM and OS VM have exactly the same output for the same input program - // and packet. - done := func() {} - if canUseOSVM() { - osVM, osVMDone := testOSVM(t, filter) - done = func() { osVMDone() } - mvm.osVM = osVM - } - - return mvm, done, nil -} - -// udpHeaderLen is the length of a UDP header. -const udpHeaderLen = 8 - -// A multiVirtualMachine is a virtualMachine which can call out to both the Go VM -// and the native OS VM, if the OS VM is available. -type multiVirtualMachine struct { - goVM virtualMachine - osVM virtualMachine - - t *testing.T -} - -func (mvm *multiVirtualMachine) Run(in []byte) (int, error) { - if len(in) < udpHeaderLen { - mvm.t.Fatalf("input must be at least length of UDP header (%d), got: %d", - udpHeaderLen, len(in)) - } - - // All tests have a UDP header as part of input, because the OS VM - // packets always will. For the Go VM, this output is trimmed before - // being sent back to tests. - goOut, goErr := mvm.goVM.Run(in) - if goOut >= udpHeaderLen { - goOut -= udpHeaderLen - } - - // If Go output is larger than the size of the packet, packet filtering - // interop tests must trim the output bytes to the length of the packet. - // The BPF VM should not do this on its own, as other uses of it do - // not trim the output byte count. - trim := len(in) - udpHeaderLen - if goOut > trim { - goOut = trim - } - - // When the OS VM is not available, process using the Go VM alone - if mvm.osVM == nil { - return goOut, goErr - } - - // The OS VM will apply its own UDP header, so remove the pseudo header - // that the Go VM needs. - osOut, err := mvm.osVM.Run(in[udpHeaderLen:]) - if err != nil { - mvm.t.Fatalf("error while running OS VM: %v", err) - } - - // Verify both VMs return same number of bytes - var mismatch bool - if goOut != osOut { - mismatch = true - mvm.t.Logf("output byte count does not match:\n- go: %v\n- os: %v", goOut, osOut) - } - - if mismatch { - mvm.t.Fatal("Go BPF and OS BPF packet outputs do not match") - } - - return goOut, goErr -} - -// An osVirtualMachine is a virtualMachine which uses the OS's BPF VM for -// processing BPF programs. -type osVirtualMachine struct { - l net.PacketConn - s net.Conn -} - -// testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting -// packets into a UDP listener with a BPF program attached to it. -func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) { - l, err := net.ListenPacket("udp4", "127.0.0.1:0") - if err != nil { - t.Fatalf("failed to open OS VM UDP listener: %v", err) - } - - prog, err := bpf.Assemble(filter) - if err != nil { - t.Fatalf("failed to compile BPF program: %v", err) - } - - p := ipv4.NewPacketConn(l) - if err = p.SetBPF(prog); err != nil { - t.Fatalf("failed to attach BPF program to listener: %v", err) - } - - s, err := net.Dial("udp4", l.LocalAddr().String()) - if err != nil { - t.Fatalf("failed to dial connection to listener: %v", err) - } - - done := func() { - _ = s.Close() - _ = l.Close() - } - - return &osVirtualMachine{ - l: l, - s: s, - }, done -} - -// Run sends the input bytes into the OS's BPF VM and returns its verdict. -func (vm *osVirtualMachine) Run(in []byte) (int, error) { - go func() { - _, _ = vm.s.Write(in) - }() - - vm.l.SetDeadline(time.Now().Add(50 * time.Millisecond)) - - var b [512]byte - n, _, err := vm.l.ReadFrom(b[:]) - if err != nil { - // A timeout indicates that BPF filtered out the packet, and thus, - // no input should be returned. - if nerr, ok := err.(net.Error); ok && nerr.Timeout() { - return n, nil - } - - return n, err - } - - return n, nil -} diff --git a/vendor/golang.org/x/net/bpf/vm_extension_test.go b/vendor/golang.org/x/net/bpf/vm_extension_test.go deleted file mode 100644 index 7a48c82f..00000000 --- a/vendor/golang.org/x/net/bpf/vm_extension_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf_test - -import ( - "testing" - - "golang.org/x/net/bpf" -) - -func TestVMLoadExtensionNotImplemented(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.LoadExtension{ - Num: 100, - }, - bpf.RetA{}, - }) - if errStr(err) != "extension 100 not implemented" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMLoadExtensionExtLen(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadExtension{ - Num: bpf.ExtLen, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, 2, 3, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 4, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} diff --git a/vendor/golang.org/x/net/bpf/vm_jump_test.go b/vendor/golang.org/x/net/bpf/vm_jump_test.go deleted file mode 100644 index e0a3a988..00000000 --- a/vendor/golang.org/x/net/bpf/vm_jump_test.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf_test - -import ( - "testing" - - "golang.org/x/net/bpf" -) - -func TestVMJumpOne(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.Jump{ - Skip: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 9, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 1, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMJumpOutOfProgram(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.Jump{ - Skip: 1, - }, - bpf.RetA{}, - }) - if errStr(err) != "cannot jump 1 instructions; jumping past program bounds" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMJumpIfTrueOutOfProgram(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.JumpIf{ - Cond: bpf.JumpEqual, - SkipTrue: 2, - }, - bpf.RetA{}, - }) - if errStr(err) != "cannot jump 2 instructions in true case; jumping past program bounds" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMJumpIfFalseOutOfProgram(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.JumpIf{ - Cond: bpf.JumpEqual, - SkipFalse: 3, - }, - bpf.RetA{}, - }) - if errStr(err) != "cannot jump 3 instructions in false case; jumping past program bounds" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMJumpIfEqual(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.JumpIf{ - Cond: bpf.JumpEqual, - Val: 1, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 9, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 1, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMJumpIfNotEqual(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.JumpIf{ - Cond: bpf.JumpNotEqual, - Val: 1, - SkipFalse: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 9, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 1, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMJumpIfGreaterThan(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 4, - }, - bpf.JumpIf{ - Cond: bpf.JumpGreaterThan, - Val: 0x00010202, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 12, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, 2, 3, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 4, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMJumpIfLessThan(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 4, - }, - bpf.JumpIf{ - Cond: bpf.JumpLessThan, - Val: 0xff010203, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 12, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, 2, 3, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 4, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMJumpIfGreaterOrEqual(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 4, - }, - bpf.JumpIf{ - Cond: bpf.JumpGreaterOrEqual, - Val: 0x00010203, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 12, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, 2, 3, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 4, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMJumpIfLessOrEqual(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 4, - }, - bpf.JumpIf{ - Cond: bpf.JumpLessOrEqual, - Val: 0xff010203, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 12, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, 2, 3, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 4, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMJumpIfBitsSet(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 2, - }, - bpf.JumpIf{ - Cond: bpf.JumpBitsSet, - Val: 0x1122, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 10, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0x01, 0x02, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 2, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMJumpIfBitsNotSet(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 2, - }, - bpf.JumpIf{ - Cond: bpf.JumpBitsNotSet, - Val: 0x1221, - SkipTrue: 1, - }, - bpf.RetConstant{ - Val: 0, - }, - bpf.RetConstant{ - Val: 10, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0x01, 0x02, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 2, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} diff --git a/vendor/golang.org/x/net/bpf/vm_load_test.go b/vendor/golang.org/x/net/bpf/vm_load_test.go deleted file mode 100644 index 04578b66..00000000 --- a/vendor/golang.org/x/net/bpf/vm_load_test.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf_test - -import ( - "net" - "testing" - - "golang.org/x/net/bpf" - "golang.org/x/net/ipv4" -) - -func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 100, - Size: 2, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, 2, 3, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 2, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMLoadAbsoluteBadInstructionSize(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Size: 5, - }, - bpf.RetA{}, - }) - if errStr(err) != "assembling instruction 1: invalid load byte length 0" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMLoadConstantOK(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadConstant{ - Dst: bpf.RegX, - Val: 9, - }, - bpf.TXA{}, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMLoadIndirectOutOfBounds(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadIndirect{ - Off: 100, - Size: 1, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMLoadMemShiftOutOfBounds(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadMemShift{ - Off: 100, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -const ( - dhcp4Port = 53 -) - -func TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) { - vm, in, done := testDHCPv4(t) - defer done() - - // Append mostly empty UDP header with incorrect DHCPv4 port - in = append(in, []byte{ - 0, 0, - 0, dhcp4Port + 1, - 0, 0, - 0, 0, - }...) - - out, err := vm.Run(in) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMLoadMemShiftLoadIndirectOK(t *testing.T) { - vm, in, done := testDHCPv4(t) - defer done() - - // Append mostly empty UDP header with correct DHCPv4 port - in = append(in, []byte{ - 0, 0, - 0, dhcp4Port, - 0, 0, - 0, 0, - }...) - - out, err := vm.Run(in) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := len(in)-8, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) { - // DHCPv4 test data courtesy of David Anderson: - // https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70 - vm, done, err := testVM(t, []bpf.Instruction{ - // Load IPv4 packet length - bpf.LoadMemShift{Off: 8}, - // Get UDP dport - bpf.LoadIndirect{Off: 8 + 2, Size: 2}, - // Correct dport? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1}, - // Accept - bpf.RetConstant{Val: 1500}, - // Ignore - bpf.RetConstant{Val: 0}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - - // Minimal requirements to make a valid IPv4 header - h := &ipv4.Header{ - Len: ipv4.HeaderLen, - Src: net.IPv4(192, 168, 1, 1), - Dst: net.IPv4(192, 168, 1, 2), - } - hb, err := h.Marshal() - if err != nil { - t.Fatalf("failed to marshal IPv4 header: %v", err) - } - - hb = append([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - }, hb...) - - return vm, hb, done -} diff --git a/vendor/golang.org/x/net/bpf/vm_ret_test.go b/vendor/golang.org/x/net/bpf/vm_ret_test.go deleted file mode 100644 index 2d86eae3..00000000 --- a/vendor/golang.org/x/net/bpf/vm_ret_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf_test - -import ( - "testing" - - "golang.org/x/net/bpf" -) - -func TestVMRetA(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 9, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMRetALargerThanInput(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadAbsolute{ - Off: 8, - Size: 2, - }, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 255, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 2, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMRetConstant(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.RetConstant{ - Val: 9, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 1, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMRetConstantLargerThanInput(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.RetConstant{ - Val: 16, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 2, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} diff --git a/vendor/golang.org/x/net/bpf/vm_scratch_test.go b/vendor/golang.org/x/net/bpf/vm_scratch_test.go deleted file mode 100644 index e600e3c2..00000000 --- a/vendor/golang.org/x/net/bpf/vm_scratch_test.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf_test - -import ( - "testing" - - "golang.org/x/net/bpf" -) - -func TestVMStoreScratchInvalidScratchRegisterTooSmall(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.StoreScratch{ - Src: bpf.RegA, - N: -1, - }, - bpf.RetA{}, - }) - if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMStoreScratchInvalidScratchRegisterTooLarge(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.StoreScratch{ - Src: bpf.RegA, - N: 16, - }, - bpf.RetA{}, - }) - if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMStoreScratchUnknownSourceRegister(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.StoreScratch{ - Src: 100, - N: 0, - }, - bpf.RetA{}, - }) - if errStr(err) != "assembling instruction 1: invalid source register 100" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMLoadScratchInvalidScratchRegisterTooSmall(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.LoadScratch{ - Dst: bpf.RegX, - N: -1, - }, - bpf.RetA{}, - }) - if errStr(err) != "assembling instruction 1: invalid scratch slot -1" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMLoadScratchInvalidScratchRegisterTooLarge(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.LoadScratch{ - Dst: bpf.RegX, - N: 16, - }, - bpf.RetA{}, - }) - if errStr(err) != "assembling instruction 1: invalid scratch slot 16" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMLoadScratchUnknownDestinationRegister(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.LoadScratch{ - Dst: 100, - N: 0, - }, - bpf.RetA{}, - }) - if errStr(err) != "assembling instruction 1: invalid target register 100" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMStoreScratchLoadScratchOneValue(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - // Load byte 255 - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - // Copy to X and store in scratch[0] - bpf.TAX{}, - bpf.StoreScratch{ - Src: bpf.RegX, - N: 0, - }, - // Load byte 1 - bpf.LoadAbsolute{ - Off: 9, - Size: 1, - }, - // Overwrite 1 with 255 from scratch[0] - bpf.LoadScratch{ - Dst: bpf.RegA, - N: 0, - }, - // Return 255 - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 255, 1, 2, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 3, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} - -func TestVMStoreScratchLoadScratchMultipleValues(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - // Load byte 10 - bpf.LoadAbsolute{ - Off: 8, - Size: 1, - }, - // Store in scratch[0] - bpf.StoreScratch{ - Src: bpf.RegA, - N: 0, - }, - // Load byte 20 - bpf.LoadAbsolute{ - Off: 9, - Size: 1, - }, - // Store in scratch[1] - bpf.StoreScratch{ - Src: bpf.RegA, - N: 1, - }, - // Load byte 30 - bpf.LoadAbsolute{ - Off: 10, - Size: 1, - }, - // Store in scratch[2] - bpf.StoreScratch{ - Src: bpf.RegA, - N: 2, - }, - // Load byte 1 - bpf.LoadAbsolute{ - Off: 11, - Size: 1, - }, - // Store in scratch[3] - bpf.StoreScratch{ - Src: bpf.RegA, - N: 3, - }, - // Load in byte 10 to X - bpf.LoadScratch{ - Dst: bpf.RegX, - N: 0, - }, - // Copy X -> A - bpf.TXA{}, - // Verify value is 10 - bpf.JumpIf{ - Cond: bpf.JumpEqual, - Val: 10, - SkipTrue: 1, - }, - // Fail test if incorrect - bpf.RetConstant{ - Val: 0, - }, - // Load in byte 20 to A - bpf.LoadScratch{ - Dst: bpf.RegA, - N: 1, - }, - // Verify value is 20 - bpf.JumpIf{ - Cond: bpf.JumpEqual, - Val: 20, - SkipTrue: 1, - }, - // Fail test if incorrect - bpf.RetConstant{ - Val: 0, - }, - // Load in byte 30 to A - bpf.LoadScratch{ - Dst: bpf.RegA, - N: 2, - }, - // Verify value is 30 - bpf.JumpIf{ - Cond: bpf.JumpEqual, - Val: 30, - SkipTrue: 1, - }, - // Fail test if incorrect - bpf.RetConstant{ - Val: 0, - }, - // Return first two bytes on success - bpf.RetConstant{ - Val: 10, - }, - }) - if err != nil { - t.Fatalf("failed to load BPF program: %v", err) - } - defer done() - - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 10, 20, 30, 1, - }) - if err != nil { - t.Fatalf("unexpected error while running program: %v", err) - } - if want, got := 2, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", - want, got) - } -} diff --git a/vendor/golang.org/x/net/bpf/vm_test.go b/vendor/golang.org/x/net/bpf/vm_test.go deleted file mode 100644 index 6bd4dd5c..00000000 --- a/vendor/golang.org/x/net/bpf/vm_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf_test - -import ( - "fmt" - "testing" - - "golang.org/x/net/bpf" -) - -var _ bpf.Instruction = unknown{} - -type unknown struct{} - -func (unknown) Assemble() (bpf.RawInstruction, error) { - return bpf.RawInstruction{}, nil -} - -func TestVMUnknownInstruction(t *testing.T) { - vm, done, err := testVM(t, []bpf.Instruction{ - bpf.LoadConstant{ - Dst: bpf.RegA, - Val: 100, - }, - // Should terminate the program with an error immediately - unknown{}, - bpf.RetA{}, - }) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - defer done() - - _, err = vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, - }) - if errStr(err) != "unknown Instruction at index 1: bpf_test.unknown" { - t.Fatalf("unexpected error while running program: %v", err) - } -} - -func TestVMNoReturnInstruction(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{ - bpf.LoadConstant{ - Dst: bpf.RegA, - Val: 1, - }, - }) - if errStr(err) != "BPF program must end with RetA or RetConstant" { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestVMNoInputInstructions(t *testing.T) { - _, _, err := testVM(t, []bpf.Instruction{}) - if errStr(err) != "one or more Instructions must be specified" { - t.Fatalf("unexpected error: %v", err) - } -} - -// ExampleNewVM demonstrates usage of a VM, using an Ethernet frame -// as input and checking its EtherType to determine if it should be accepted. -func ExampleNewVM() { - // Offset | Length | Comment - // ------------------------- - // 00 | 06 | Ethernet destination MAC address - // 06 | 06 | Ethernet source MAC address - // 12 | 02 | Ethernet EtherType - const ( - etOff = 12 - etLen = 2 - - etARP = 0x0806 - ) - - // Set up a VM to filter traffic based on if its EtherType - // matches the ARP EtherType. - vm, err := bpf.NewVM([]bpf.Instruction{ - // Load EtherType value from Ethernet header - bpf.LoadAbsolute{ - Off: etOff, - Size: etLen, - }, - // If EtherType is equal to the ARP EtherType, jump to allow - // packet to be accepted - bpf.JumpIf{ - Cond: bpf.JumpEqual, - Val: etARP, - SkipTrue: 1, - }, - // EtherType does not match the ARP EtherType - bpf.RetConstant{ - Val: 0, - }, - // EtherType matches the ARP EtherType, accept up to 1500 - // bytes of packet - bpf.RetConstant{ - Val: 1500, - }, - }) - if err != nil { - panic(fmt.Sprintf("failed to load BPF program: %v", err)) - } - - // Create an Ethernet frame with the ARP EtherType for testing - frame := []byte{ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, - 0x08, 0x06, - // Payload omitted for brevity - } - - // Run our VM's BPF program using the Ethernet frame as input - out, err := vm.Run(frame) - if err != nil { - panic(fmt.Sprintf("failed to accept Ethernet frame: %v", err)) - } - - // BPF VM can return a byte count greater than the number of input - // bytes, so trim the output to match the input byte length - if out > len(frame) { - out = len(frame) - } - - fmt.Printf("out: %d bytes", out) - - // Output: - // out: 14 bytes -} - -// errStr returns the string representation of an error, or -// "" if it is nil. -func errStr(err error) string { - if err == nil { - return "" - } - - return err.Error() -} diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg deleted file mode 100644 index 3f8b14b6..00000000 --- a/vendor/golang.org/x/net/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index d3681ab4..00000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go deleted file mode 100644 index 62844131..00000000 --- a/vendor/golang.org/x/net/context/context_test.go +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "fmt" - "math/rand" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -// otherContext is a Context that's not one of the types defined in context.go. -// This lets us test code paths that differ based on the underlying type of the -// Context. -type otherContext struct { - Context -} - -func TestBackground(t *testing.T) { - c := Background() - if c == nil { - t.Fatalf("Background returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.Background"; got != want { - t.Errorf("Background().String() = %q want %q", got, want) - } -} - -func TestTODO(t *testing.T) { - c := TODO() - if c == nil { - t.Fatalf("TODO returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.TODO"; got != want { - t.Errorf("TODO().String() = %q want %q", got, want) - } -} - -func TestWithCancel(t *testing.T) { - c1, cancel := WithCancel(Background()) - - if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { - t.Errorf("c1.String() = %q want %q", got, want) - } - - o := otherContext{c1} - c2, _ := WithCancel(o) - contexts := []Context{c1, o, c2} - - for i, c := range contexts { - if d := c.Done(); d == nil { - t.Errorf("c[%d].Done() == %v want non-nil", i, d) - } - if e := c.Err(); e != nil { - t.Errorf("c[%d].Err() == %v want nil", i, e) - } - - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - } - - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - - for i, c := range contexts { - select { - case <-c.Done(): - default: - t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) - } - if e := c.Err(); e != Canceled { - t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) - } - } -} - -func TestParentFinishesChild(t *testing.T) { - // Context tree: - // parent -> cancelChild - // parent -> valueChild -> timerChild - parent, cancel := WithCancel(Background()) - cancelChild, stop := WithCancel(parent) - defer stop() - valueChild := WithValue(parent, "key", "value") - timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) - defer stop() - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-cancelChild.Done(): - t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) - case x := <-timerChild.Done(): - t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) - case x := <-valueChild.Done(): - t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) - default: - } - - // The parent's children should contain the two cancelable children. - pc := parent.(*cancelCtx) - cc := cancelChild.(*cancelCtx) - tc := timerChild.(*timerCtx) - pc.mu.Lock() - if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { - t.Errorf("bad linkage: pc.children = %v, want %v and %v", - pc.children, cc, tc) - } - pc.mu.Unlock() - - if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) - } - if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) - } - - cancel() - - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) - } - pc.mu.Unlock() - - // parent and children should all be finished. - check := func(ctx Context, name string) { - select { - case <-ctx.Done(): - default: - t.Errorf("<-%s.Done() blocked, but shouldn't have", name) - } - if e := ctx.Err(); e != Canceled { - t.Errorf("%s.Err() == %v want %v", name, e, Canceled) - } - } - check(parent, "parent") - check(cancelChild, "cancelChild") - check(valueChild, "valueChild") - check(timerChild, "timerChild") - - // WithCancel should return a canceled context on a canceled parent. - precanceledChild := WithValue(parent, "key", "value") - select { - case <-precanceledChild.Done(): - default: - t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") - } - if e := precanceledChild.Err(); e != Canceled { - t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) - } -} - -func TestChildFinishesFirst(t *testing.T) { - cancelable, stop := WithCancel(Background()) - defer stop() - for _, parent := range []Context{Background(), cancelable} { - child, cancel := WithCancel(parent) - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-child.Done(): - t.Errorf("<-child.Done() == %v want nothing (it should block)", x) - default: - } - - cc := child.(*cancelCtx) - pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() - if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { - t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) - } - - if pcok { - pc.mu.Lock() - if len(pc.children) != 1 || !pc.children[cc] { - t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) - } - pc.mu.Unlock() - } - - cancel() - - if pcok { - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) - } - pc.mu.Unlock() - } - - // child should be finished. - select { - case <-child.Done(): - default: - t.Errorf("<-child.Done() blocked, but shouldn't have") - } - if e := child.Err(); e != Canceled { - t.Errorf("child.Err() == %v want %v", e, Canceled) - } - - // parent should not be finished. - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - default: - } - if e := parent.Err(); e != nil { - t.Errorf("parent.Err() == %v want nil", e) - } - } -} - -func testDeadline(c Context, wait time.Duration, t *testing.T) { - select { - case <-time.After(wait): - t.Fatalf("context should have timed out") - case <-c.Done(): - } - if e := c.Err(); e != DeadlineExceeded { - t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) - } -} - -func TestDeadline(t *testing.T) { - t.Parallel() - const timeUnit = 500 * time.Millisecond - c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit)) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 2*timeUnit, t) - - c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) - o := otherContext{c} - testDeadline(o, 2*timeUnit, t) - - c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) - o = otherContext{c} - c, _ = WithDeadline(o, time.Now().Add(3*timeUnit)) - testDeadline(c, 2*timeUnit, t) -} - -func TestTimeout(t *testing.T) { - t.Parallel() - const timeUnit = 500 * time.Millisecond - c, _ := WithTimeout(Background(), 1*timeUnit) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 2*timeUnit, t) - - c, _ = WithTimeout(Background(), 1*timeUnit) - o := otherContext{c} - testDeadline(o, 2*timeUnit, t) - - c, _ = WithTimeout(Background(), 1*timeUnit) - o = otherContext{c} - c, _ = WithTimeout(o, 3*timeUnit) - testDeadline(c, 2*timeUnit, t) -} - -func TestCanceledTimeout(t *testing.T) { - t.Parallel() - const timeUnit = 500 * time.Millisecond - c, _ := WithTimeout(Background(), 2*timeUnit) - o := otherContext{c} - c, cancel := WithTimeout(o, 4*timeUnit) - cancel() - time.Sleep(1 * timeUnit) // let cancelation propagate - select { - case <-c.Done(): - default: - t.Errorf("<-c.Done() blocked, but shouldn't have") - } - if e := c.Err(); e != Canceled { - t.Errorf("c.Err() == %v want %v", e, Canceled) - } -} - -type key1 int -type key2 int - -var k1 = key1(1) -var k2 = key2(1) // same int as k1, different type -var k3 = key2(3) // same type as k2, different int - -func TestValues(t *testing.T) { - check := func(c Context, nm, v1, v2, v3 string) { - if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { - t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) - } - if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { - t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) - } - if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { - t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) - } - } - - c0 := Background() - check(c0, "c0", "", "", "") - - c1 := WithValue(Background(), k1, "c1k1") - check(c1, "c1", "c1k1", "", "") - - if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { - t.Errorf("c.String() = %q want %q", got, want) - } - - c2 := WithValue(c1, k2, "c2k2") - check(c2, "c2", "c1k1", "c2k2", "") - - c3 := WithValue(c2, k3, "c3k3") - check(c3, "c2", "c1k1", "c2k2", "c3k3") - - c4 := WithValue(c3, k1, nil) - check(c4, "c4", "", "c2k2", "c3k3") - - o0 := otherContext{Background()} - check(o0, "o0", "", "", "") - - o1 := otherContext{WithValue(Background(), k1, "c1k1")} - check(o1, "o1", "c1k1", "", "") - - o2 := WithValue(o1, k2, "o2k2") - check(o2, "o2", "c1k1", "o2k2", "") - - o3 := otherContext{c4} - check(o3, "o3", "", "c2k2", "c3k3") - - o4 := WithValue(o3, k3, nil) - check(o4, "o4", "", "c2k2", "") -} - -func TestAllocs(t *testing.T) { - bg := Background() - for _, test := range []struct { - desc string - f func() - limit float64 - gccgoLimit float64 - }{ - { - desc: "Background()", - f: func() { Background() }, - limit: 0, - gccgoLimit: 0, - }, - { - desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), - f: func() { - c := WithValue(bg, k1, nil) - c.Value(k1) - }, - limit: 3, - gccgoLimit: 3, - }, - { - desc: "WithTimeout(bg, 15*time.Millisecond)", - f: func() { - c, _ := WithTimeout(bg, 15*time.Millisecond) - <-c.Done() - }, - limit: 8, - gccgoLimit: 16, - }, - { - desc: "WithCancel(bg)", - f: func() { - c, cancel := WithCancel(bg) - cancel() - <-c.Done() - }, - limit: 5, - gccgoLimit: 8, - }, - { - desc: "WithTimeout(bg, 100*time.Millisecond)", - f: func() { - c, cancel := WithTimeout(bg, 100*time.Millisecond) - cancel() - <-c.Done() - }, - limit: 8, - gccgoLimit: 25, - }, - } { - limit := test.limit - if runtime.Compiler == "gccgo" { - // gccgo does not yet do escape analysis. - // TODO(iant): Remove this when gccgo does do escape analysis. - limit = test.gccgoLimit - } - if n := testing.AllocsPerRun(100, test.f); n > limit { - t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) - } - } -} - -func TestSimultaneousCancels(t *testing.T) { - root, cancel := WithCancel(Background()) - m := map[Context]CancelFunc{root: cancel} - q := []Context{root} - // Create a tree of contexts. - for len(q) != 0 && len(m) < 100 { - parent := q[0] - q = q[1:] - for i := 0; i < 4; i++ { - ctx, cancel := WithCancel(parent) - m[ctx] = cancel - q = append(q, ctx) - } - } - // Start all the cancels in a random order. - var wg sync.WaitGroup - wg.Add(len(m)) - for _, cancel := range m { - go func(cancel CancelFunc) { - cancel() - wg.Done() - }(cancel) - } - // Wait on all the contexts in a random order. - for ctx := range m { - select { - case <-ctx.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) - } - } - // Wait for all the cancel functions to return. - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) - } -} - -func TestInterlockedCancels(t *testing.T) { - parent, cancelParent := WithCancel(Background()) - child, cancelChild := WithCancel(parent) - go func() { - parent.Done() - cancelChild() - }() - cancelParent() - select { - case <-child.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) - } -} - -func TestLayersCancel(t *testing.T) { - testLayers(t, time.Now().UnixNano(), false) -} - -func TestLayersTimeout(t *testing.T) { - testLayers(t, time.Now().UnixNano(), true) -} - -func testLayers(t *testing.T, seed int64, testTimeout bool) { - rand.Seed(seed) - errorf := func(format string, a ...interface{}) { - t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) - } - const ( - timeout = 200 * time.Millisecond - minLayers = 30 - ) - type value int - var ( - vals []*value - cancels []CancelFunc - numTimers int - ctx = Background() - ) - for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { - switch rand.Intn(3) { - case 0: - v := new(value) - ctx = WithValue(ctx, v, v) - vals = append(vals, v) - case 1: - var cancel CancelFunc - ctx, cancel = WithCancel(ctx) - cancels = append(cancels, cancel) - case 2: - var cancel CancelFunc - ctx, cancel = WithTimeout(ctx, timeout) - cancels = append(cancels, cancel) - numTimers++ - } - } - checkValues := func(when string) { - for _, key := range vals { - if val := ctx.Value(key).(*value); key != val { - errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) - } - } - } - select { - case <-ctx.Done(): - errorf("ctx should not be canceled yet") - default: - } - if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { - t.Errorf("ctx.String() = %q want prefix %q", s, prefix) - } - t.Log(ctx) - checkValues("before cancel") - if testTimeout { - select { - case <-ctx.Done(): - case <-time.After(timeout + 100*time.Millisecond): - errorf("ctx should have timed out") - } - checkValues("after timeout") - } else { - cancel := cancels[rand.Intn(len(cancels))] - cancel() - select { - case <-ctx.Done(): - default: - errorf("ctx should be canceled") - } - checkValues("after cancel") - } -} - -func TestCancelRemoves(t *testing.T) { - checkChildren := func(when string, ctx Context, want int) { - if got := len(ctx.(*cancelCtx).children); got != want { - t.Errorf("%s: context has %d children, want %d", when, got, want) - } - } - - ctx, _ := WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel := WithCancel(ctx) - checkChildren("with WithCancel child ", ctx, 1) - cancel() - checkChildren("after cancelling WithCancel child", ctx, 0) - - ctx, _ = WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel = WithTimeout(ctx, 60*time.Minute) - checkChildren("with WithTimeout child ", ctx, 1) - cancel() - checkChildren("after cancelling WithTimeout child", ctx, 0) -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 606cf1f9..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go deleted file mode 100644 index 72411b1b..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,go1.7 - -package ctxhttp - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" - - "context" -) - -func TestGo17Context(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - })) - defer ts.Close() - ctx := context.Background() - resp, err := Get(ctx, http.DefaultClient, ts.URL) - if resp == nil || err != nil { - t.Fatalf("error received from client: %v %v", err, resp) - } - resp.Body.Close() -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go deleted file mode 100644 index 926870cc..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // TODO(djd): Respect any existing value of req.Cancel. - cancel := make(chan struct{}) - req.Cancel = cancel - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - // Make local copies of test hooks closed over by goroutines below. - // Prevents data races in tests. - testHookDoReturned := testHookDoReturned - testHookDidBodyClose := testHookDidBodyClose - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - close(cancel) - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - close(cancel) - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go deleted file mode 100644 index 9159cf02..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!go1.7 - -package ctxhttp - -import ( - "net" - "net/http" - "net/http/httptest" - "sync" - "testing" - "time" - - "golang.org/x/net/context" -) - -// golang.org/issue/14065 -func TestClosesResponseBodyOnCancel(t *testing.T) { - defer func() { testHookContextDoneBeforeHeaders = nop }() - defer func() { testHookDoReturned = nop }() - defer func() { testHookDidBodyClose = nop }() - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) - defer ts.Close() - - ctx, cancel := context.WithCancel(context.Background()) - - // closed when Do enters select case <-ctx.Done() - enteredDonePath := make(chan struct{}) - - testHookContextDoneBeforeHeaders = func() { - close(enteredDonePath) - } - - testHookDoReturned = func() { - // We now have the result (the Flush'd headers) at least, - // so we can cancel the request. - cancel() - - // But block the client.Do goroutine from sending - // until Do enters into the <-ctx.Done() path, since - // otherwise if both channels are readable, select - // picks a random one. - <-enteredDonePath - } - - sawBodyClose := make(chan struct{}) - testHookDidBodyClose = func() { close(sawBodyClose) } - - tr := &http.Transport{} - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - req, _ := http.NewRequest("GET", ts.URL, nil) - _, doErr := Do(ctx, c, req) - - select { - case <-sawBodyClose: - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for body to close") - } - - if doErr != ctx.Err() { - t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) - } -} - -type noteCloseConn struct { - net.Conn - onceClose sync.Once - closefn func() -} - -func (c *noteCloseConn) Close() error { - c.onceClose.Do(c.closefn) - return c.Conn.Close() -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go deleted file mode 100644 index 1e415518..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -package ctxhttp - -import ( - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "golang.org/x/net/context" -) - -const ( - requestDuration = 100 * time.Millisecond - requestBody = "ok" -) - -func okHandler(w http.ResponseWriter, r *http.Request) { - time.Sleep(requestDuration) - io.WriteString(w, requestBody) -} - -func TestNoTimeout(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(okHandler)) - defer ts.Close() - - ctx := context.Background() - res, err := Get(ctx, nil, ts.URL) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if string(slurp) != requestBody { - t.Errorf("body = %q; want %q", slurp, requestBody) - } -} - -func TestCancelBeforeHeaders(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - blockServer := make(chan struct{}) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - cancel() - <-blockServer - io.WriteString(w, requestBody) - })) - defer ts.Close() - defer close(blockServer) - - res, err := Get(ctx, nil, ts.URL) - if err == nil { - res.Body.Close() - t.Fatal("Get returned unexpected nil error") - } - if err != context.Canceled { - t.Errorf("err = %v; want %v", err, context.Canceled) - } -} - -func TestCancelAfterHangingRequest(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.(http.Flusher).Flush() - <-w.(http.CloseNotifier).CloseNotify() - })) - defer ts.Close() - - ctx, cancel := context.WithCancel(context.Background()) - resp, err := Get(ctx, nil, ts.URL) - if err != nil { - t.Fatalf("unexpected error in Get: %v", err) - } - - // Cancel befer reading the body. - // Reading Request.Body should fail, since the request was - // canceled before anything was written. - cancel() - - done := make(chan struct{}) - - go func() { - b, err := ioutil.ReadAll(resp.Body) - if len(b) != 0 || err == nil { - t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) - } - close(done) - }() - - select { - case <-time.After(1 * time.Second): - t.Errorf("Test timed out") - case <-done: - } -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index d20f52b7..00000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index d88bd1db..00000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 0f35592d..00000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index b105f80b..00000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go deleted file mode 100644 index e6f56691..00000000 --- a/vendor/golang.org/x/net/context/withtimeout_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context_test - -import ( - "fmt" - "time" - - "golang.org/x/net/context" -) - -// This example passes a context with a timeout to tell a blocking function that -// it should abandon its work after the timeout elapses. -func ExampleWithTimeout() { - // Pass a context with a timeout to tell a blocking function that it - // should abandon its work after the timeout elapses. - ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) - defer cancel() - - select { - case <-time.After(1 * time.Second): - fmt.Println("overslept") - case <-ctx.Done(): - fmt.Println(ctx.Err()) // prints "context deadline exceeded" - } - - // Output: - // context deadline exceeded -} diff --git a/vendor/golang.org/x/net/dict/dict.go b/vendor/golang.org/x/net/dict/dict.go deleted file mode 100644 index 93e65c03..00000000 --- a/vendor/golang.org/x/net/dict/dict.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package dict implements the Dictionary Server Protocol -// as defined in RFC 2229. -package dict // import "golang.org/x/net/dict" - -import ( - "net/textproto" - "strconv" - "strings" -) - -// A Client represents a client connection to a dictionary server. -type Client struct { - text *textproto.Conn -} - -// Dial returns a new client connected to a dictionary server at -// addr on the given network. -func Dial(network, addr string) (*Client, error) { - text, err := textproto.Dial(network, addr) - if err != nil { - return nil, err - } - _, _, err = text.ReadCodeLine(220) - if err != nil { - text.Close() - return nil, err - } - return &Client{text: text}, nil -} - -// Close closes the connection to the dictionary server. -func (c *Client) Close() error { - return c.text.Close() -} - -// A Dict represents a dictionary available on the server. -type Dict struct { - Name string // short name of dictionary - Desc string // long description -} - -// Dicts returns a list of the dictionaries available on the server. -func (c *Client) Dicts() ([]Dict, error) { - id, err := c.text.Cmd("SHOW DB") - if err != nil { - return nil, err - } - - c.text.StartResponse(id) - defer c.text.EndResponse(id) - - _, _, err = c.text.ReadCodeLine(110) - if err != nil { - return nil, err - } - lines, err := c.text.ReadDotLines() - if err != nil { - return nil, err - } - _, _, err = c.text.ReadCodeLine(250) - - dicts := make([]Dict, len(lines)) - for i := range dicts { - d := &dicts[i] - a, _ := fields(lines[i]) - if len(a) < 2 { - return nil, textproto.ProtocolError("invalid dictionary: " + lines[i]) - } - d.Name = a[0] - d.Desc = a[1] - } - return dicts, err -} - -// A Defn represents a definition. -type Defn struct { - Dict Dict // Dict where definition was found - Word string // Word being defined - Text []byte // Definition text, typically multiple lines -} - -// Define requests the definition of the given word. -// The argument dict names the dictionary to use, -// the Name field of a Dict returned by Dicts. -// -// The special dictionary name "*" means to look in all the -// server's dictionaries. -// The special dictionary name "!" means to look in all the -// server's dictionaries in turn, stopping after finding the word -// in one of them. -func (c *Client) Define(dict, word string) ([]*Defn, error) { - id, err := c.text.Cmd("DEFINE %s %q", dict, word) - if err != nil { - return nil, err - } - - c.text.StartResponse(id) - defer c.text.EndResponse(id) - - _, line, err := c.text.ReadCodeLine(150) - if err != nil { - return nil, err - } - a, _ := fields(line) - if len(a) < 1 { - return nil, textproto.ProtocolError("malformed response: " + line) - } - n, err := strconv.Atoi(a[0]) - if err != nil { - return nil, textproto.ProtocolError("invalid definition count: " + a[0]) - } - def := make([]*Defn, n) - for i := 0; i < n; i++ { - _, line, err = c.text.ReadCodeLine(151) - if err != nil { - return nil, err - } - a, _ := fields(line) - if len(a) < 3 { - // skip it, to keep protocol in sync - i-- - n-- - def = def[0:n] - continue - } - d := &Defn{Word: a[0], Dict: Dict{a[1], a[2]}} - d.Text, err = c.text.ReadDotBytes() - if err != nil { - return nil, err - } - def[i] = d - } - _, _, err = c.text.ReadCodeLine(250) - return def, err -} - -// Fields returns the fields in s. -// Fields are space separated unquoted words -// or quoted with single or double quote. -func fields(s string) ([]string, error) { - var v []string - i := 0 - for { - for i < len(s) && (s[i] == ' ' || s[i] == '\t') { - i++ - } - if i >= len(s) { - break - } - if s[i] == '"' || s[i] == '\'' { - q := s[i] - // quoted string - var j int - for j = i + 1; ; j++ { - if j >= len(s) { - return nil, textproto.ProtocolError("malformed quoted string") - } - if s[j] == '\\' { - j++ - continue - } - if s[j] == q { - j++ - break - } - } - v = append(v, unquote(s[i+1:j-1])) - i = j - } else { - // atom - var j int - for j = i; j < len(s); j++ { - if s[j] == ' ' || s[j] == '\t' || s[j] == '\\' || s[j] == '"' || s[j] == '\'' { - break - } - } - v = append(v, s[i:j]) - i = j - } - if i < len(s) { - c := s[i] - if c != ' ' && c != '\t' { - return nil, textproto.ProtocolError("quotes not on word boundaries") - } - } - } - return v, nil -} - -func unquote(s string) string { - if strings.Index(s, "\\") < 0 { - return s - } - b := []byte(s) - w := 0 - for r := 0; r < len(b); r++ { - c := b[r] - if c == '\\' { - r++ - c = b[r] - } - b[w] = c - w++ - } - return string(b[0:w]) -} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/example_test.go b/vendor/golang.org/x/net/dns/dnsmessage/example_test.go deleted file mode 100644 index 5415c2d3..00000000 --- a/vendor/golang.org/x/net/dns/dnsmessage/example_test.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dnsmessage_test - -import ( - "fmt" - "net" - "strings" - - "golang.org/x/net/dns/dnsmessage" -) - -func mustNewName(name string) dnsmessage.Name { - n, err := dnsmessage.NewName(name) - if err != nil { - panic(err) - } - return n -} - -func ExampleParser() { - msg := dnsmessage.Message{ - Header: dnsmessage.Header{Response: true, Authoritative: true}, - Questions: []dnsmessage.Question{ - { - Name: mustNewName("foo.bar.example.com."), - Type: dnsmessage.TypeA, - Class: dnsmessage.ClassINET, - }, - { - Name: mustNewName("bar.example.com."), - Type: dnsmessage.TypeA, - Class: dnsmessage.ClassINET, - }, - }, - Answers: []dnsmessage.Resource{ - { - dnsmessage.ResourceHeader{ - Name: mustNewName("foo.bar.example.com."), - Type: dnsmessage.TypeA, - Class: dnsmessage.ClassINET, - }, - &dnsmessage.AResource{[4]byte{127, 0, 0, 1}}, - }, - { - dnsmessage.ResourceHeader{ - Name: mustNewName("bar.example.com."), - Type: dnsmessage.TypeA, - Class: dnsmessage.ClassINET, - }, - &dnsmessage.AResource{[4]byte{127, 0, 0, 2}}, - }, - }, - } - - buf, err := msg.Pack() - if err != nil { - panic(err) - } - - wantName := "bar.example.com." - - var p dnsmessage.Parser - if _, err := p.Start(buf); err != nil { - panic(err) - } - - for { - q, err := p.Question() - if err == dnsmessage.ErrSectionDone { - break - } - if err != nil { - panic(err) - } - - if q.Name.String() != wantName { - continue - } - - fmt.Println("Found question for name", wantName) - if err := p.SkipAllQuestions(); err != nil { - panic(err) - } - break - } - - var gotIPs []net.IP - for { - h, err := p.AnswerHeader() - if err == dnsmessage.ErrSectionDone { - break - } - if err != nil { - panic(err) - } - - if (h.Type != dnsmessage.TypeA && h.Type != dnsmessage.TypeAAAA) || h.Class != dnsmessage.ClassINET { - continue - } - - if !strings.EqualFold(h.Name.String(), wantName) { - if err := p.SkipAnswer(); err != nil { - panic(err) - } - continue - } - - switch h.Type { - case dnsmessage.TypeA: - r, err := p.AResource() - if err != nil { - panic(err) - } - gotIPs = append(gotIPs, r.A[:]) - case dnsmessage.TypeAAAA: - r, err := p.AAAAResource() - if err != nil { - panic(err) - } - gotIPs = append(gotIPs, r.AAAA[:]) - } - } - - fmt.Printf("Found A/AAAA records for name %s: %v\n", wantName, gotIPs) - - // Output: - // Found question for name bar.example.com. - // Found A/AAAA records for name bar.example.com.: [127.0.0.2] -} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message.go b/vendor/golang.org/x/net/dns/dnsmessage/message.go deleted file mode 100644 index 19b260de..00000000 --- a/vendor/golang.org/x/net/dns/dnsmessage/message.go +++ /dev/null @@ -1,1997 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package dnsmessage provides a mostly RFC 1035 compliant implementation of -// DNS message packing and unpacking. -// -// This implementation is designed to minimize heap allocations and avoid -// unnecessary packing and unpacking as much as possible. -package dnsmessage - -import ( - "errors" -) - -// Packet formats - -// A Type is a type of DNS request and response. -type Type uint16 - -// A Class is a type of network. -type Class uint16 - -// An OpCode is a DNS operation code. -type OpCode uint16 - -// An RCode is a DNS response status code. -type RCode uint16 - -// Wire constants. -const ( - // ResourceHeader.Type and Question.Type - TypeA Type = 1 - TypeNS Type = 2 - TypeCNAME Type = 5 - TypeSOA Type = 6 - TypePTR Type = 12 - TypeMX Type = 15 - TypeTXT Type = 16 - TypeAAAA Type = 28 - TypeSRV Type = 33 - - // Question.Type - TypeWKS Type = 11 - TypeHINFO Type = 13 - TypeMINFO Type = 14 - TypeAXFR Type = 252 - TypeALL Type = 255 - - // ResourceHeader.Class and Question.Class - ClassINET Class = 1 - ClassCSNET Class = 2 - ClassCHAOS Class = 3 - ClassHESIOD Class = 4 - - // Question.Class - ClassANY Class = 255 - - // Message.Rcode - RCodeSuccess RCode = 0 - RCodeFormatError RCode = 1 - RCodeServerFailure RCode = 2 - RCodeNameError RCode = 3 - RCodeNotImplemented RCode = 4 - RCodeRefused RCode = 5 -) - -var ( - // ErrNotStarted indicates that the prerequisite information isn't - // available yet because the previous records haven't been appropriately - // parsed, skipped or finished. - ErrNotStarted = errors.New("parsing/packing of this type isn't available yet") - - // ErrSectionDone indicated that all records in the section have been - // parsed or finished. - ErrSectionDone = errors.New("parsing/packing of this section has completed") - - errBaseLen = errors.New("insufficient data for base length type") - errCalcLen = errors.New("insufficient data for calculated length type") - errReserved = errors.New("segment prefix is reserved") - errTooManyPtr = errors.New("too many pointers (>10)") - errInvalidPtr = errors.New("invalid pointer") - errNilResouceBody = errors.New("nil resource body") - errResourceLen = errors.New("insufficient data for resource body length") - errSegTooLong = errors.New("segment length too long") - errZeroSegLen = errors.New("zero length segment") - errResTooLong = errors.New("resource length too long") - errTooManyQuestions = errors.New("too many Questions to pack (>65535)") - errTooManyAnswers = errors.New("too many Answers to pack (>65535)") - errTooManyAuthorities = errors.New("too many Authorities to pack (>65535)") - errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)") - errNonCanonicalName = errors.New("name is not in canonical format (it must end with a .)") -) - -// Internal constants. -const ( - // packStartingCap is the default initial buffer size allocated during - // packing. - // - // The starting capacity doesn't matter too much, but most DNS responses - // Will be <= 512 bytes as it is the limit for DNS over UDP. - packStartingCap = 512 - - // uint16Len is the length (in bytes) of a uint16. - uint16Len = 2 - - // uint32Len is the length (in bytes) of a uint32. - uint32Len = 4 - - // headerLen is the length (in bytes) of a DNS header. - // - // A header is comprised of 6 uint16s and no padding. - headerLen = 6 * uint16Len -) - -type nestedError struct { - // s is the current level's error message. - s string - - // err is the nested error. - err error -} - -// nestedError implements error.Error. -func (e *nestedError) Error() string { - return e.s + ": " + e.err.Error() -} - -// Header is a representation of a DNS message header. -type Header struct { - ID uint16 - Response bool - OpCode OpCode - Authoritative bool - Truncated bool - RecursionDesired bool - RecursionAvailable bool - RCode RCode -} - -func (m *Header) pack() (id uint16, bits uint16) { - id = m.ID - bits = uint16(m.OpCode)<<11 | uint16(m.RCode) - if m.RecursionAvailable { - bits |= headerBitRA - } - if m.RecursionDesired { - bits |= headerBitRD - } - if m.Truncated { - bits |= headerBitTC - } - if m.Authoritative { - bits |= headerBitAA - } - if m.Response { - bits |= headerBitQR - } - return -} - -// Message is a representation of a DNS message. -type Message struct { - Header - Questions []Question - Answers []Resource - Authorities []Resource - Additionals []Resource -} - -type section uint8 - -const ( - sectionNotStarted section = iota - sectionHeader - sectionQuestions - sectionAnswers - sectionAuthorities - sectionAdditionals - sectionDone - - headerBitQR = 1 << 15 // query/response (response=1) - headerBitAA = 1 << 10 // authoritative - headerBitTC = 1 << 9 // truncated - headerBitRD = 1 << 8 // recursion desired - headerBitRA = 1 << 7 // recursion available -) - -var sectionNames = map[section]string{ - sectionHeader: "header", - sectionQuestions: "Question", - sectionAnswers: "Answer", - sectionAuthorities: "Authority", - sectionAdditionals: "Additional", -} - -// header is the wire format for a DNS message header. -type header struct { - id uint16 - bits uint16 - questions uint16 - answers uint16 - authorities uint16 - additionals uint16 -} - -func (h *header) count(sec section) uint16 { - switch sec { - case sectionQuestions: - return h.questions - case sectionAnswers: - return h.answers - case sectionAuthorities: - return h.authorities - case sectionAdditionals: - return h.additionals - } - return 0 -} - -func (h *header) pack(msg []byte) []byte { - msg = packUint16(msg, h.id) - msg = packUint16(msg, h.bits) - msg = packUint16(msg, h.questions) - msg = packUint16(msg, h.answers) - msg = packUint16(msg, h.authorities) - return packUint16(msg, h.additionals) -} - -func (h *header) unpack(msg []byte, off int) (int, error) { - newOff := off - var err error - if h.id, newOff, err = unpackUint16(msg, newOff); err != nil { - return off, &nestedError{"id", err} - } - if h.bits, newOff, err = unpackUint16(msg, newOff); err != nil { - return off, &nestedError{"bits", err} - } - if h.questions, newOff, err = unpackUint16(msg, newOff); err != nil { - return off, &nestedError{"questions", err} - } - if h.answers, newOff, err = unpackUint16(msg, newOff); err != nil { - return off, &nestedError{"answers", err} - } - if h.authorities, newOff, err = unpackUint16(msg, newOff); err != nil { - return off, &nestedError{"authorities", err} - } - if h.additionals, newOff, err = unpackUint16(msg, newOff); err != nil { - return off, &nestedError{"additionals", err} - } - return newOff, nil -} - -func (h *header) header() Header { - return Header{ - ID: h.id, - Response: (h.bits & headerBitQR) != 0, - OpCode: OpCode(h.bits>>11) & 0xF, - Authoritative: (h.bits & headerBitAA) != 0, - Truncated: (h.bits & headerBitTC) != 0, - RecursionDesired: (h.bits & headerBitRD) != 0, - RecursionAvailable: (h.bits & headerBitRA) != 0, - RCode: RCode(h.bits & 0xF), - } -} - -// A Resource is a DNS resource record. -type Resource struct { - Header ResourceHeader - Body ResourceBody -} - -// A ResourceBody is a DNS resource record minus the header. -type ResourceBody interface { - // pack packs a Resource except for its header. - pack(msg []byte, compression map[string]int) ([]byte, error) - - // realType returns the actual type of the Resource. This is used to - // fill in the header Type field. - realType() Type -} - -func (r *Resource) pack(msg []byte, compression map[string]int) ([]byte, error) { - if r.Body == nil { - return msg, errNilResouceBody - } - oldMsg := msg - r.Header.Type = r.Body.realType() - msg, length, err := r.Header.pack(msg, compression) - if err != nil { - return msg, &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - msg, err = r.Body.pack(msg, compression) - if err != nil { - return msg, &nestedError{"content", err} - } - if err := r.Header.fixLen(msg, length, preLen); err != nil { - return oldMsg, err - } - return msg, nil -} - -// A Parser allows incrementally parsing a DNS message. -// -// When parsing is started, the Header is parsed. Next, each Question can be -// either parsed or skipped. Alternatively, all Questions can be skipped at -// once. When all Questions have been parsed, attempting to parse Questions -// will return (nil, nil) and attempting to skip Questions will return -// (true, nil). After all Questions have been either parsed or skipped, all -// Answers, Authorities and Additionals can be either parsed or skipped in the -// same way, and each type of Resource must be fully parsed or skipped before -// proceeding to the next type of Resource. -// -// Note that there is no requirement to fully skip or parse the message. -type Parser struct { - msg []byte - header header - - section section - off int - index int - resHeaderValid bool - resHeader ResourceHeader -} - -// Start parses the header and enables the parsing of Questions. -func (p *Parser) Start(msg []byte) (Header, error) { - if p.msg != nil { - *p = Parser{} - } - p.msg = msg - var err error - if p.off, err = p.header.unpack(msg, 0); err != nil { - return Header{}, &nestedError{"unpacking header", err} - } - p.section = sectionQuestions - return p.header.header(), nil -} - -func (p *Parser) checkAdvance(sec section) error { - if p.section < sec { - return ErrNotStarted - } - if p.section > sec { - return ErrSectionDone - } - p.resHeaderValid = false - if p.index == int(p.header.count(sec)) { - p.index = 0 - p.section++ - return ErrSectionDone - } - return nil -} - -func (p *Parser) resource(sec section) (Resource, error) { - var r Resource - var err error - r.Header, err = p.resourceHeader(sec) - if err != nil { - return r, err - } - p.resHeaderValid = false - r.Body, p.off, err = unpackResourceBody(p.msg, p.off, r.Header) - if err != nil { - return Resource{}, &nestedError{"unpacking " + sectionNames[sec], err} - } - p.index++ - return r, nil -} - -func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) { - if p.resHeaderValid { - return p.resHeader, nil - } - if err := p.checkAdvance(sec); err != nil { - return ResourceHeader{}, err - } - var hdr ResourceHeader - off, err := hdr.unpack(p.msg, p.off) - if err != nil { - return ResourceHeader{}, err - } - p.resHeaderValid = true - p.resHeader = hdr - p.off = off - return hdr, nil -} - -func (p *Parser) skipResource(sec section) error { - if p.resHeaderValid { - newOff := p.off + int(p.resHeader.Length) - if newOff > len(p.msg) { - return errResourceLen - } - p.off = newOff - p.resHeaderValid = false - p.index++ - return nil - } - if err := p.checkAdvance(sec); err != nil { - return err - } - var err error - p.off, err = skipResource(p.msg, p.off) - if err != nil { - return &nestedError{"skipping: " + sectionNames[sec], err} - } - p.index++ - return nil -} - -// Question parses a single Question. -func (p *Parser) Question() (Question, error) { - if err := p.checkAdvance(sectionQuestions); err != nil { - return Question{}, err - } - var name Name - off, err := name.unpack(p.msg, p.off) - if err != nil { - return Question{}, &nestedError{"unpacking Question.Name", err} - } - typ, off, err := unpackType(p.msg, off) - if err != nil { - return Question{}, &nestedError{"unpacking Question.Type", err} - } - class, off, err := unpackClass(p.msg, off) - if err != nil { - return Question{}, &nestedError{"unpacking Question.Class", err} - } - p.off = off - p.index++ - return Question{name, typ, class}, nil -} - -// AllQuestions parses all Questions. -func (p *Parser) AllQuestions() ([]Question, error) { - qs := make([]Question, 0, p.header.questions) - for { - q, err := p.Question() - if err == ErrSectionDone { - return qs, nil - } - if err != nil { - return nil, err - } - qs = append(qs, q) - } -} - -// SkipQuestion skips a single Question. -func (p *Parser) SkipQuestion() error { - if err := p.checkAdvance(sectionQuestions); err != nil { - return err - } - off, err := skipName(p.msg, p.off) - if err != nil { - return &nestedError{"skipping Question Name", err} - } - if off, err = skipType(p.msg, off); err != nil { - return &nestedError{"skipping Question Type", err} - } - if off, err = skipClass(p.msg, off); err != nil { - return &nestedError{"skipping Question Class", err} - } - p.off = off - p.index++ - return nil -} - -// SkipAllQuestions skips all Questions. -func (p *Parser) SkipAllQuestions() error { - for { - if err := p.SkipQuestion(); err == ErrSectionDone { - return nil - } else if err != nil { - return err - } - } -} - -// AnswerHeader parses a single Answer ResourceHeader. -func (p *Parser) AnswerHeader() (ResourceHeader, error) { - return p.resourceHeader(sectionAnswers) -} - -// Answer parses a single Answer Resource. -func (p *Parser) Answer() (Resource, error) { - return p.resource(sectionAnswers) -} - -// AllAnswers parses all Answer Resources. -func (p *Parser) AllAnswers() ([]Resource, error) { - as := make([]Resource, 0, p.header.answers) - for { - a, err := p.Answer() - if err == ErrSectionDone { - return as, nil - } - if err != nil { - return nil, err - } - as = append(as, a) - } -} - -// SkipAnswer skips a single Answer Resource. -func (p *Parser) SkipAnswer() error { - return p.skipResource(sectionAnswers) -} - -// SkipAllAnswers skips all Answer Resources. -func (p *Parser) SkipAllAnswers() error { - for { - if err := p.SkipAnswer(); err == ErrSectionDone { - return nil - } else if err != nil { - return err - } - } -} - -// AuthorityHeader parses a single Authority ResourceHeader. -func (p *Parser) AuthorityHeader() (ResourceHeader, error) { - return p.resourceHeader(sectionAuthorities) -} - -// Authority parses a single Authority Resource. -func (p *Parser) Authority() (Resource, error) { - return p.resource(sectionAuthorities) -} - -// AllAuthorities parses all Authority Resources. -func (p *Parser) AllAuthorities() ([]Resource, error) { - as := make([]Resource, 0, p.header.authorities) - for { - a, err := p.Authority() - if err == ErrSectionDone { - return as, nil - } - if err != nil { - return nil, err - } - as = append(as, a) - } -} - -// SkipAuthority skips a single Authority Resource. -func (p *Parser) SkipAuthority() error { - return p.skipResource(sectionAuthorities) -} - -// SkipAllAuthorities skips all Authority Resources. -func (p *Parser) SkipAllAuthorities() error { - for { - if err := p.SkipAuthority(); err == ErrSectionDone { - return nil - } else if err != nil { - return err - } - } -} - -// AdditionalHeader parses a single Additional ResourceHeader. -func (p *Parser) AdditionalHeader() (ResourceHeader, error) { - return p.resourceHeader(sectionAdditionals) -} - -// Additional parses a single Additional Resource. -func (p *Parser) Additional() (Resource, error) { - return p.resource(sectionAdditionals) -} - -// AllAdditionals parses all Additional Resources. -func (p *Parser) AllAdditionals() ([]Resource, error) { - as := make([]Resource, 0, p.header.additionals) - for { - a, err := p.Additional() - if err == ErrSectionDone { - return as, nil - } - if err != nil { - return nil, err - } - as = append(as, a) - } -} - -// SkipAdditional skips a single Additional Resource. -func (p *Parser) SkipAdditional() error { - return p.skipResource(sectionAdditionals) -} - -// SkipAllAdditionals skips all Additional Resources. -func (p *Parser) SkipAllAdditionals() error { - for { - if err := p.SkipAdditional(); err == ErrSectionDone { - return nil - } else if err != nil { - return err - } - } -} - -// CNAMEResource parses a single CNAMEResource. -// -// One of the XXXHeader methods must have been called before calling this -// method. -func (p *Parser) CNAMEResource() (CNAMEResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeCNAME { - return CNAMEResource{}, ErrNotStarted - } - r, err := unpackCNAMEResource(p.msg, p.off) - if err != nil { - return CNAMEResource{}, err - } - p.off += int(p.resHeader.Length) - p.resHeaderValid = false - p.index++ - return r, nil -} - -// MXResource parses a single MXResource. -// -// One of the XXXHeader methods must have been called before calling this -// method. -func (p *Parser) MXResource() (MXResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeMX { - return MXResource{}, ErrNotStarted - } - r, err := unpackMXResource(p.msg, p.off) - if err != nil { - return MXResource{}, err - } - p.off += int(p.resHeader.Length) - p.resHeaderValid = false - p.index++ - return r, nil -} - -// NSResource parses a single NSResource. -// -// One of the XXXHeader methods must have been called before calling this -// method. -func (p *Parser) NSResource() (NSResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeNS { - return NSResource{}, ErrNotStarted - } - r, err := unpackNSResource(p.msg, p.off) - if err != nil { - return NSResource{}, err - } - p.off += int(p.resHeader.Length) - p.resHeaderValid = false - p.index++ - return r, nil -} - -// PTRResource parses a single PTRResource. -// -// One of the XXXHeader methods must have been called before calling this -// method. -func (p *Parser) PTRResource() (PTRResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypePTR { - return PTRResource{}, ErrNotStarted - } - r, err := unpackPTRResource(p.msg, p.off) - if err != nil { - return PTRResource{}, err - } - p.off += int(p.resHeader.Length) - p.resHeaderValid = false - p.index++ - return r, nil -} - -// SOAResource parses a single SOAResource. -// -// One of the XXXHeader methods must have been called before calling this -// method. -func (p *Parser) SOAResource() (SOAResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeSOA { - return SOAResource{}, ErrNotStarted - } - r, err := unpackSOAResource(p.msg, p.off) - if err != nil { - return SOAResource{}, err - } - p.off += int(p.resHeader.Length) - p.resHeaderValid = false - p.index++ - return r, nil -} - -// TXTResource parses a single TXTResource. -// -// One of the XXXHeader methods must have been called before calling this -// method. -func (p *Parser) TXTResource() (TXTResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeTXT { - return TXTResource{}, ErrNotStarted - } - r, err := unpackTXTResource(p.msg, p.off, p.resHeader.Length) - if err != nil { - return TXTResource{}, err - } - p.off += int(p.resHeader.Length) - p.resHeaderValid = false - p.index++ - return r, nil -} - -// SRVResource parses a single SRVResource. -// -// One of the XXXHeader methods must have been called before calling this -// method. -func (p *Parser) SRVResource() (SRVResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeSRV { - return SRVResource{}, ErrNotStarted - } - r, err := unpackSRVResource(p.msg, p.off) - if err != nil { - return SRVResource{}, err - } - p.off += int(p.resHeader.Length) - p.resHeaderValid = false - p.index++ - return r, nil -} - -// AResource parses a single AResource. -// -// One of the XXXHeader methods must have been called before calling this -// method. -func (p *Parser) AResource() (AResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeA { - return AResource{}, ErrNotStarted - } - r, err := unpackAResource(p.msg, p.off) - if err != nil { - return AResource{}, err - } - p.off += int(p.resHeader.Length) - p.resHeaderValid = false - p.index++ - return r, nil -} - -// AAAAResource parses a single AAAAResource. -// -// One of the XXXHeader methods must have been called before calling this -// method. -func (p *Parser) AAAAResource() (AAAAResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeAAAA { - return AAAAResource{}, ErrNotStarted - } - r, err := unpackAAAAResource(p.msg, p.off) - if err != nil { - return AAAAResource{}, err - } - p.off += int(p.resHeader.Length) - p.resHeaderValid = false - p.index++ - return r, nil -} - -// Unpack parses a full Message. -func (m *Message) Unpack(msg []byte) error { - var p Parser - var err error - if m.Header, err = p.Start(msg); err != nil { - return err - } - if m.Questions, err = p.AllQuestions(); err != nil { - return err - } - if m.Answers, err = p.AllAnswers(); err != nil { - return err - } - if m.Authorities, err = p.AllAuthorities(); err != nil { - return err - } - if m.Additionals, err = p.AllAdditionals(); err != nil { - return err - } - return nil -} - -// Pack packs a full Message. -func (m *Message) Pack() ([]byte, error) { - // Validate the lengths. It is very unlikely that anyone will try to - // pack more than 65535 of any particular type, but it is possible and - // we should fail gracefully. - if len(m.Questions) > int(^uint16(0)) { - return nil, errTooManyQuestions - } - if len(m.Answers) > int(^uint16(0)) { - return nil, errTooManyAnswers - } - if len(m.Authorities) > int(^uint16(0)) { - return nil, errTooManyAuthorities - } - if len(m.Additionals) > int(^uint16(0)) { - return nil, errTooManyAdditionals - } - - var h header - h.id, h.bits = m.Header.pack() - - h.questions = uint16(len(m.Questions)) - h.answers = uint16(len(m.Answers)) - h.authorities = uint16(len(m.Authorities)) - h.additionals = uint16(len(m.Additionals)) - - msg := make([]byte, 0, packStartingCap) - - msg = h.pack(msg) - - // RFC 1035 allows (but does not require) compression for packing. RFC - // 1035 requires unpacking implementations to support compression, so - // unconditionally enabling it is fine. - // - // DNS lookups are typically done over UDP, and RFC 1035 states that UDP - // DNS packets can be a maximum of 512 bytes long. Without compression, - // many DNS response packets are over this limit, so enabling - // compression will help ensure compliance. - compression := map[string]int{} - - for i := range m.Questions { - var err error - if msg, err = m.Questions[i].pack(msg, compression); err != nil { - return nil, &nestedError{"packing Question", err} - } - } - for i := range m.Answers { - var err error - if msg, err = m.Answers[i].pack(msg, compression); err != nil { - return nil, &nestedError{"packing Answer", err} - } - } - for i := range m.Authorities { - var err error - if msg, err = m.Authorities[i].pack(msg, compression); err != nil { - return nil, &nestedError{"packing Authority", err} - } - } - for i := range m.Additionals { - var err error - if msg, err = m.Additionals[i].pack(msg, compression); err != nil { - return nil, &nestedError{"packing Additional", err} - } - } - - return msg, nil -} - -// A Builder allows incrementally packing a DNS message. -type Builder struct { - msg []byte - header header - section section - compression map[string]int -} - -// Start initializes the builder. -// -// buf is optional (nil is fine), but if provided, Start takes ownership of buf. -func (b *Builder) Start(buf []byte, h Header) { - b.StartWithoutCompression(buf, h) - b.compression = map[string]int{} -} - -// StartWithoutCompression initializes the builder with compression disabled. -// -// This avoids compression related allocations, but can result in larger message -// sizes. Be careful with this mode as it can cause messages to exceed the UDP -// size limit. -// -// buf is optional (nil is fine), but if provided, Start takes ownership of buf. -func (b *Builder) StartWithoutCompression(buf []byte, h Header) { - *b = Builder{msg: buf} - b.header.id, b.header.bits = h.pack() - if cap(b.msg) < headerLen { - b.msg = make([]byte, 0, packStartingCap) - } - b.msg = b.msg[:headerLen] - b.section = sectionHeader -} - -func (b *Builder) startCheck(s section) error { - if b.section <= sectionNotStarted { - return ErrNotStarted - } - if b.section > s { - return ErrSectionDone - } - return nil -} - -// StartQuestions prepares the builder for packing Questions. -func (b *Builder) StartQuestions() error { - if err := b.startCheck(sectionQuestions); err != nil { - return err - } - b.section = sectionQuestions - return nil -} - -// StartAnswers prepares the builder for packing Answers. -func (b *Builder) StartAnswers() error { - if err := b.startCheck(sectionAnswers); err != nil { - return err - } - b.section = sectionAnswers - return nil -} - -// StartAuthorities prepares the builder for packing Authorities. -func (b *Builder) StartAuthorities() error { - if err := b.startCheck(sectionAuthorities); err != nil { - return err - } - b.section = sectionAuthorities - return nil -} - -// StartAdditionals prepares the builder for packing Additionals. -func (b *Builder) StartAdditionals() error { - if err := b.startCheck(sectionAdditionals); err != nil { - return err - } - b.section = sectionAdditionals - return nil -} - -func (b *Builder) incrementSectionCount() error { - var count *uint16 - var err error - switch b.section { - case sectionQuestions: - count = &b.header.questions - err = errTooManyQuestions - case sectionAnswers: - count = &b.header.answers - err = errTooManyAnswers - case sectionAuthorities: - count = &b.header.authorities - err = errTooManyAuthorities - case sectionAdditionals: - count = &b.header.additionals - err = errTooManyAdditionals - } - if *count == ^uint16(0) { - return err - } - *count++ - return nil -} - -// Question adds a single Question. -func (b *Builder) Question(q Question) error { - if b.section < sectionQuestions { - return ErrNotStarted - } - if b.section > sectionQuestions { - return ErrSectionDone - } - msg, err := q.pack(b.msg, b.compression) - if err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -func (b *Builder) checkResourceSection() error { - if b.section < sectionAnswers { - return ErrNotStarted - } - if b.section > sectionAdditionals { - return ErrSectionDone - } - return nil -} - -// CNAMEResource adds a single CNAMEResource. -func (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error { - if err := b.checkResourceSection(); err != nil { - return err - } - h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) - if err != nil { - return &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { - return &nestedError{"CNAMEResource body", err} - } - if err := h.fixLen(msg, length, preLen); err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -// MXResource adds a single MXResource. -func (b *Builder) MXResource(h ResourceHeader, r MXResource) error { - if err := b.checkResourceSection(); err != nil { - return err - } - h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) - if err != nil { - return &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { - return &nestedError{"MXResource body", err} - } - if err := h.fixLen(msg, length, preLen); err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -// NSResource adds a single NSResource. -func (b *Builder) NSResource(h ResourceHeader, r NSResource) error { - if err := b.checkResourceSection(); err != nil { - return err - } - h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) - if err != nil { - return &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { - return &nestedError{"NSResource body", err} - } - if err := h.fixLen(msg, length, preLen); err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -// PTRResource adds a single PTRResource. -func (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error { - if err := b.checkResourceSection(); err != nil { - return err - } - h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) - if err != nil { - return &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { - return &nestedError{"PTRResource body", err} - } - if err := h.fixLen(msg, length, preLen); err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -// SOAResource adds a single SOAResource. -func (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error { - if err := b.checkResourceSection(); err != nil { - return err - } - h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) - if err != nil { - return &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { - return &nestedError{"SOAResource body", err} - } - if err := h.fixLen(msg, length, preLen); err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -// TXTResource adds a single TXTResource. -func (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error { - if err := b.checkResourceSection(); err != nil { - return err - } - h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) - if err != nil { - return &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { - return &nestedError{"TXTResource body", err} - } - if err := h.fixLen(msg, length, preLen); err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -// SRVResource adds a single SRVResource. -func (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error { - if err := b.checkResourceSection(); err != nil { - return err - } - h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) - if err != nil { - return &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { - return &nestedError{"SRVResource body", err} - } - if err := h.fixLen(msg, length, preLen); err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -// AResource adds a single AResource. -func (b *Builder) AResource(h ResourceHeader, r AResource) error { - if err := b.checkResourceSection(); err != nil { - return err - } - h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) - if err != nil { - return &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { - return &nestedError{"AResource body", err} - } - if err := h.fixLen(msg, length, preLen); err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -// AAAAResource adds a single AAAAResource. -func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error { - if err := b.checkResourceSection(); err != nil { - return err - } - h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) - if err != nil { - return &nestedError{"ResourceHeader", err} - } - preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { - return &nestedError{"AAAAResource body", err} - } - if err := h.fixLen(msg, length, preLen); err != nil { - return err - } - if err := b.incrementSectionCount(); err != nil { - return err - } - b.msg = msg - return nil -} - -// Finish ends message building and generates a binary packet. -func (b *Builder) Finish() ([]byte, error) { - if b.section < sectionHeader { - return nil, ErrNotStarted - } - b.section = sectionDone - b.header.pack(b.msg[:0]) - return b.msg, nil -} - -// A ResourceHeader is the header of a DNS resource record. There are -// many types of DNS resource records, but they all share the same header. -type ResourceHeader struct { - // Name is the domain name for which this resource record pertains. - Name Name - - // Type is the type of DNS resource record. - // - // This field will be set automatically during packing. - Type Type - - // Class is the class of network to which this DNS resource record - // pertains. - Class Class - - // TTL is the length of time (measured in seconds) which this resource - // record is valid for (time to live). All Resources in a set should - // have the same TTL (RFC 2181 Section 5.2). - TTL uint32 - - // Length is the length of data in the resource record after the header. - // - // This field will be set automatically during packing. - Length uint16 -} - -// pack packs all of the fields in a ResourceHeader except for the length. The -// length bytes are returned as a slice so they can be filled in after the rest -// of the Resource has been packed. -func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int) (msg []byte, length []byte, err error) { - msg = oldMsg - if msg, err = h.Name.pack(msg, compression); err != nil { - return oldMsg, nil, &nestedError{"Name", err} - } - msg = packType(msg, h.Type) - msg = packClass(msg, h.Class) - msg = packUint32(msg, h.TTL) - lenBegin := len(msg) - msg = packUint16(msg, h.Length) - return msg, msg[lenBegin : lenBegin+uint16Len], nil -} - -func (h *ResourceHeader) unpack(msg []byte, off int) (int, error) { - newOff := off - var err error - if newOff, err = h.Name.unpack(msg, newOff); err != nil { - return off, &nestedError{"Name", err} - } - if h.Type, newOff, err = unpackType(msg, newOff); err != nil { - return off, &nestedError{"Type", err} - } - if h.Class, newOff, err = unpackClass(msg, newOff); err != nil { - return off, &nestedError{"Class", err} - } - if h.TTL, newOff, err = unpackUint32(msg, newOff); err != nil { - return off, &nestedError{"TTL", err} - } - if h.Length, newOff, err = unpackUint16(msg, newOff); err != nil { - return off, &nestedError{"Length", err} - } - return newOff, nil -} - -func (h *ResourceHeader) fixLen(msg []byte, length []byte, preLen int) error { - conLen := len(msg) - preLen - if conLen > int(^uint16(0)) { - return errResTooLong - } - - // Fill in the length now that we know how long the content is. - packUint16(length[:0], uint16(conLen)) - h.Length = uint16(conLen) - - return nil -} - -func skipResource(msg []byte, off int) (int, error) { - newOff, err := skipName(msg, off) - if err != nil { - return off, &nestedError{"Name", err} - } - if newOff, err = skipType(msg, newOff); err != nil { - return off, &nestedError{"Type", err} - } - if newOff, err = skipClass(msg, newOff); err != nil { - return off, &nestedError{"Class", err} - } - if newOff, err = skipUint32(msg, newOff); err != nil { - return off, &nestedError{"TTL", err} - } - length, newOff, err := unpackUint16(msg, newOff) - if err != nil { - return off, &nestedError{"Length", err} - } - if newOff += int(length); newOff > len(msg) { - return off, errResourceLen - } - return newOff, nil -} - -func packUint16(msg []byte, field uint16) []byte { - return append(msg, byte(field>>8), byte(field)) -} - -func unpackUint16(msg []byte, off int) (uint16, int, error) { - if off+uint16Len > len(msg) { - return 0, off, errBaseLen - } - return uint16(msg[off])<<8 | uint16(msg[off+1]), off + uint16Len, nil -} - -func skipUint16(msg []byte, off int) (int, error) { - if off+uint16Len > len(msg) { - return off, errBaseLen - } - return off + uint16Len, nil -} - -func packType(msg []byte, field Type) []byte { - return packUint16(msg, uint16(field)) -} - -func unpackType(msg []byte, off int) (Type, int, error) { - t, o, err := unpackUint16(msg, off) - return Type(t), o, err -} - -func skipType(msg []byte, off int) (int, error) { - return skipUint16(msg, off) -} - -func packClass(msg []byte, field Class) []byte { - return packUint16(msg, uint16(field)) -} - -func unpackClass(msg []byte, off int) (Class, int, error) { - c, o, err := unpackUint16(msg, off) - return Class(c), o, err -} - -func skipClass(msg []byte, off int) (int, error) { - return skipUint16(msg, off) -} - -func packUint32(msg []byte, field uint32) []byte { - return append( - msg, - byte(field>>24), - byte(field>>16), - byte(field>>8), - byte(field), - ) -} - -func unpackUint32(msg []byte, off int) (uint32, int, error) { - if off+uint32Len > len(msg) { - return 0, off, errBaseLen - } - v := uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3]) - return v, off + uint32Len, nil -} - -func skipUint32(msg []byte, off int) (int, error) { - if off+uint32Len > len(msg) { - return off, errBaseLen - } - return off + uint32Len, nil -} - -func packText(msg []byte, field string) []byte { - for len(field) > 0 { - l := len(field) - if l > 255 { - l = 255 - } - msg = append(msg, byte(l)) - msg = append(msg, field[:l]...) - field = field[l:] - } - return msg -} - -func unpackText(msg []byte, off int) (string, int, error) { - if off >= len(msg) { - return "", off, errBaseLen - } - beginOff := off + 1 - endOff := beginOff + int(msg[off]) - if endOff > len(msg) { - return "", off, errCalcLen - } - return string(msg[beginOff:endOff]), endOff, nil -} - -func skipText(msg []byte, off int) (int, error) { - if off >= len(msg) { - return off, errBaseLen - } - endOff := off + 1 + int(msg[off]) - if endOff > len(msg) { - return off, errCalcLen - } - return endOff, nil -} - -func packBytes(msg []byte, field []byte) []byte { - return append(msg, field...) -} - -func unpackBytes(msg []byte, off int, field []byte) (int, error) { - newOff := off + len(field) - if newOff > len(msg) { - return off, errBaseLen - } - copy(field, msg[off:newOff]) - return newOff, nil -} - -func skipBytes(msg []byte, off int, field []byte) (int, error) { - newOff := off + len(field) - if newOff > len(msg) { - return off, errBaseLen - } - return newOff, nil -} - -const nameLen = 255 - -// A Name is a non-encoded domain name. It is used instead of strings to avoid -// allocations. -type Name struct { - Data [nameLen]byte - Length uint8 -} - -// NewName creates a new Name from a string. -func NewName(name string) (Name, error) { - if len([]byte(name)) > nameLen { - return Name{}, errCalcLen - } - n := Name{Length: uint8(len(name))} - copy(n.Data[:], []byte(name)) - return n, nil -} - -func (n Name) String() string { - return string(n.Data[:n.Length]) -} - -// pack packs a domain name. -// -// Domain names are a sequence of counted strings split at the dots. They end -// with a zero-length string. Compression can be used to reuse domain suffixes. -// -// The compression map will be updated with new domain suffixes. If compression -// is nil, compression will not be used. -func (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) { - oldMsg := msg - - // Add a trailing dot to canonicalize name. - if n.Length == 0 || n.Data[n.Length-1] != '.' { - return oldMsg, errNonCanonicalName - } - - // Allow root domain. - if n.Data[0] == '.' && n.Length == 1 { - return append(msg, 0), nil - } - - // Emit sequence of counted strings, chopping at dots. - for i, begin := 0, 0; i < int(n.Length); i++ { - // Check for the end of the segment. - if n.Data[i] == '.' { - // The two most significant bits have special meaning. - // It isn't allowed for segments to be long enough to - // need them. - if i-begin >= 1<<6 { - return oldMsg, errSegTooLong - } - - // Segments must have a non-zero length. - if i-begin == 0 { - return oldMsg, errZeroSegLen - } - - msg = append(msg, byte(i-begin)) - - for j := begin; j < i; j++ { - msg = append(msg, n.Data[j]) - } - - begin = i + 1 - continue - } - - // We can only compress domain suffixes starting with a new - // segment. A pointer is two bytes with the two most significant - // bits set to 1 to indicate that it is a pointer. - if (i == 0 || n.Data[i-1] == '.') && compression != nil { - if ptr, ok := compression[string(n.Data[i:])]; ok { - // Hit. Emit a pointer instead of the rest of - // the domain. - return append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil - } - - // Miss. Add the suffix to the compression table if the - // offset can be stored in the available 14 bytes. - if len(msg) <= int(^uint16(0)>>2) { - compression[string(n.Data[i:])] = len(msg) - } - } - } - return append(msg, 0), nil -} - -// unpack unpacks a domain name. -func (n *Name) unpack(msg []byte, off int) (int, error) { - // currOff is the current working offset. - currOff := off - - // newOff is the offset where the next record will start. Pointers lead - // to data that belongs to other names and thus doesn't count towards to - // the usage of this name. - newOff := off - - // ptr is the number of pointers followed. - var ptr int - - // Name is a slice representation of the name data. - name := n.Data[:0] - -Loop: - for { - if currOff >= len(msg) { - return off, errBaseLen - } - c := int(msg[currOff]) - currOff++ - switch c & 0xC0 { - case 0x00: // String segment - if c == 0x00 { - // A zero length signals the end of the name. - break Loop - } - endOff := currOff + c - if endOff > len(msg) { - return off, errCalcLen - } - name = append(name, msg[currOff:endOff]...) - name = append(name, '.') - currOff = endOff - case 0xC0: // Pointer - if currOff >= len(msg) { - return off, errInvalidPtr - } - c1 := msg[currOff] - currOff++ - if ptr == 0 { - newOff = currOff - } - // Don't follow too many pointers, maybe there's a loop. - if ptr++; ptr > 10 { - return off, errTooManyPtr - } - currOff = (c^0xC0)<<8 | int(c1) - default: - // Prefixes 0x80 and 0x40 are reserved. - return off, errReserved - } - } - if len(name) == 0 { - name = append(name, '.') - } - if len(name) > len(n.Data) { - return off, errCalcLen - } - n.Length = uint8(len(name)) - if ptr == 0 { - newOff = currOff - } - return newOff, nil -} - -func skipName(msg []byte, off int) (int, error) { - // newOff is the offset where the next record will start. Pointers lead - // to data that belongs to other names and thus doesn't count towards to - // the usage of this name. - newOff := off - -Loop: - for { - if newOff >= len(msg) { - return off, errBaseLen - } - c := int(msg[newOff]) - newOff++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // A zero length signals the end of the name. - break Loop - } - // literal string - newOff += c - if newOff > len(msg) { - return off, errCalcLen - } - case 0xC0: - // Pointer to somewhere else in msg. - - // Pointers are two bytes. - newOff++ - - // Don't follow the pointer as the data here has ended. - break Loop - default: - // Prefixes 0x80 and 0x40 are reserved. - return off, errReserved - } - } - - return newOff, nil -} - -// A Question is a DNS query. -type Question struct { - Name Name - Type Type - Class Class -} - -func (q *Question) pack(msg []byte, compression map[string]int) ([]byte, error) { - msg, err := q.Name.pack(msg, compression) - if err != nil { - return msg, &nestedError{"Name", err} - } - msg = packType(msg, q.Type) - return packClass(msg, q.Class), nil -} - -func unpackResourceBody(msg []byte, off int, hdr ResourceHeader) (ResourceBody, int, error) { - var ( - r ResourceBody - err error - name string - ) - switch hdr.Type { - case TypeA: - var rb AResource - rb, err = unpackAResource(msg, off) - r = &rb - name = "A" - case TypeNS: - var rb NSResource - rb, err = unpackNSResource(msg, off) - r = &rb - name = "NS" - case TypeCNAME: - var rb CNAMEResource - rb, err = unpackCNAMEResource(msg, off) - r = &rb - name = "CNAME" - case TypeSOA: - var rb SOAResource - rb, err = unpackSOAResource(msg, off) - r = &rb - name = "SOA" - case TypePTR: - var rb PTRResource - rb, err = unpackPTRResource(msg, off) - r = &rb - name = "PTR" - case TypeMX: - var rb MXResource - rb, err = unpackMXResource(msg, off) - r = &rb - name = "MX" - case TypeTXT: - var rb TXTResource - rb, err = unpackTXTResource(msg, off, hdr.Length) - r = &rb - name = "TXT" - case TypeAAAA: - var rb AAAAResource - rb, err = unpackAAAAResource(msg, off) - r = &rb - name = "AAAA" - case TypeSRV: - var rb SRVResource - rb, err = unpackSRVResource(msg, off) - r = &rb - name = "SRV" - } - if err != nil { - return nil, off, &nestedError{name + " record", err} - } - if r == nil { - return nil, off, errors.New("invalid resource type: " + string(hdr.Type+'0')) - } - return r, off + int(hdr.Length), nil -} - -// A CNAMEResource is a CNAME Resource record. -type CNAMEResource struct { - CNAME Name -} - -func (r *CNAMEResource) realType() Type { - return TypeCNAME -} - -func (r *CNAMEResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return r.CNAME.pack(msg, compression) -} - -func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) { - var cname Name - if _, err := cname.unpack(msg, off); err != nil { - return CNAMEResource{}, err - } - return CNAMEResource{cname}, nil -} - -// An MXResource is an MX Resource record. -type MXResource struct { - Pref uint16 - MX Name -} - -func (r *MXResource) realType() Type { - return TypeMX -} - -func (r *MXResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - oldMsg := msg - msg = packUint16(msg, r.Pref) - msg, err := r.MX.pack(msg, compression) - if err != nil { - return oldMsg, &nestedError{"MXResource.MX", err} - } - return msg, nil -} - -func unpackMXResource(msg []byte, off int) (MXResource, error) { - pref, off, err := unpackUint16(msg, off) - if err != nil { - return MXResource{}, &nestedError{"Pref", err} - } - var mx Name - if _, err := mx.unpack(msg, off); err != nil { - return MXResource{}, &nestedError{"MX", err} - } - return MXResource{pref, mx}, nil -} - -// An NSResource is an NS Resource record. -type NSResource struct { - NS Name -} - -func (r *NSResource) realType() Type { - return TypeNS -} - -func (r *NSResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return r.NS.pack(msg, compression) -} - -func unpackNSResource(msg []byte, off int) (NSResource, error) { - var ns Name - if _, err := ns.unpack(msg, off); err != nil { - return NSResource{}, err - } - return NSResource{ns}, nil -} - -// A PTRResource is a PTR Resource record. -type PTRResource struct { - PTR Name -} - -func (r *PTRResource) realType() Type { - return TypePTR -} - -func (r *PTRResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return r.PTR.pack(msg, compression) -} - -func unpackPTRResource(msg []byte, off int) (PTRResource, error) { - var ptr Name - if _, err := ptr.unpack(msg, off); err != nil { - return PTRResource{}, err - } - return PTRResource{ptr}, nil -} - -// An SOAResource is an SOA Resource record. -type SOAResource struct { - NS Name - MBox Name - Serial uint32 - Refresh uint32 - Retry uint32 - Expire uint32 - - // MinTTL the is the default TTL of Resources records which did not - // contain a TTL value and the TTL of negative responses. (RFC 2308 - // Section 4) - MinTTL uint32 -} - -func (r *SOAResource) realType() Type { - return TypeSOA -} - -func (r *SOAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - oldMsg := msg - msg, err := r.NS.pack(msg, compression) - if err != nil { - return oldMsg, &nestedError{"SOAResource.NS", err} - } - msg, err = r.MBox.pack(msg, compression) - if err != nil { - return oldMsg, &nestedError{"SOAResource.MBox", err} - } - msg = packUint32(msg, r.Serial) - msg = packUint32(msg, r.Refresh) - msg = packUint32(msg, r.Retry) - msg = packUint32(msg, r.Expire) - return packUint32(msg, r.MinTTL), nil -} - -func unpackSOAResource(msg []byte, off int) (SOAResource, error) { - var ns Name - off, err := ns.unpack(msg, off) - if err != nil { - return SOAResource{}, &nestedError{"NS", err} - } - var mbox Name - if off, err = mbox.unpack(msg, off); err != nil { - return SOAResource{}, &nestedError{"MBox", err} - } - serial, off, err := unpackUint32(msg, off) - if err != nil { - return SOAResource{}, &nestedError{"Serial", err} - } - refresh, off, err := unpackUint32(msg, off) - if err != nil { - return SOAResource{}, &nestedError{"Refresh", err} - } - retry, off, err := unpackUint32(msg, off) - if err != nil { - return SOAResource{}, &nestedError{"Retry", err} - } - expire, off, err := unpackUint32(msg, off) - if err != nil { - return SOAResource{}, &nestedError{"Expire", err} - } - minTTL, _, err := unpackUint32(msg, off) - if err != nil { - return SOAResource{}, &nestedError{"MinTTL", err} - } - return SOAResource{ns, mbox, serial, refresh, retry, expire, minTTL}, nil -} - -// A TXTResource is a TXT Resource record. -type TXTResource struct { - Txt string // Not a domain name. -} - -func (r *TXTResource) realType() Type { - return TypeTXT -} - -func (r *TXTResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return packText(msg, r.Txt), nil -} - -func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) { - var txt string - for n := uint16(0); n < length; { - var t string - var err error - if t, off, err = unpackText(msg, off); err != nil { - return TXTResource{}, &nestedError{"text", err} - } - // Check if we got too many bytes. - if length-n < uint16(len(t))+1 { - return TXTResource{}, errCalcLen - } - n += uint16(len(t)) + 1 - txt += t - } - return TXTResource{txt}, nil -} - -// An SRVResource is an SRV Resource record. -type SRVResource struct { - Priority uint16 - Weight uint16 - Port uint16 - Target Name // Not compressed as per RFC 2782. -} - -func (r *SRVResource) realType() Type { - return TypeSRV -} - -func (r *SRVResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - oldMsg := msg - msg = packUint16(msg, r.Priority) - msg = packUint16(msg, r.Weight) - msg = packUint16(msg, r.Port) - msg, err := r.Target.pack(msg, nil) - if err != nil { - return oldMsg, &nestedError{"SRVResource.Target", err} - } - return msg, nil -} - -func unpackSRVResource(msg []byte, off int) (SRVResource, error) { - priority, off, err := unpackUint16(msg, off) - if err != nil { - return SRVResource{}, &nestedError{"Priority", err} - } - weight, off, err := unpackUint16(msg, off) - if err != nil { - return SRVResource{}, &nestedError{"Weight", err} - } - port, off, err := unpackUint16(msg, off) - if err != nil { - return SRVResource{}, &nestedError{"Port", err} - } - var target Name - if _, err := target.unpack(msg, off); err != nil { - return SRVResource{}, &nestedError{"Target", err} - } - return SRVResource{priority, weight, port, target}, nil -} - -// An AResource is an A Resource record. -type AResource struct { - A [4]byte -} - -func (r *AResource) realType() Type { - return TypeA -} - -func (r *AResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return packBytes(msg, r.A[:]), nil -} - -func unpackAResource(msg []byte, off int) (AResource, error) { - var a [4]byte - if _, err := unpackBytes(msg, off, a[:]); err != nil { - return AResource{}, err - } - return AResource{a}, nil -} - -// An AAAAResource is an AAAA Resource record. -type AAAAResource struct { - AAAA [16]byte -} - -func (r *AAAAResource) realType() Type { - return TypeAAAA -} - -func (r *AAAAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return packBytes(msg, r.AAAA[:]), nil -} - -func unpackAAAAResource(msg []byte, off int) (AAAAResource, error) { - var aaaa [16]byte - if _, err := unpackBytes(msg, off, aaaa[:]); err != nil { - return AAAAResource{}, err - } - return AAAAResource{aaaa}, nil -} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go deleted file mode 100644 index 9295d36c..00000000 --- a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go +++ /dev/null @@ -1,1116 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package dnsmessage - -import ( - "bytes" - "fmt" - "reflect" - "testing" -) - -func mustNewName(name string) Name { - n, err := NewName(name) - if err != nil { - panic(err) - } - return n -} - -func (m *Message) String() string { - s := fmt.Sprintf("Message: %#v\n", &m.Header) - if len(m.Questions) > 0 { - s += "-- Questions\n" - for _, q := range m.Questions { - s += fmt.Sprintf("%#v\n", q) - } - } - if len(m.Answers) > 0 { - s += "-- Answers\n" - for _, a := range m.Answers { - s += fmt.Sprintf("%#v\n", a) - } - } - if len(m.Authorities) > 0 { - s += "-- Authorities\n" - for _, ns := range m.Authorities { - s += fmt.Sprintf("%#v\n", ns) - } - } - if len(m.Additionals) > 0 { - s += "-- Additionals\n" - for _, e := range m.Additionals { - s += fmt.Sprintf("%#v\n", e) - } - } - return s -} - -func TestNameString(t *testing.T) { - want := "foo" - name := mustNewName(want) - if got := fmt.Sprint(name); got != want { - t.Errorf("got fmt.Sprint(%#v) = %s, want = %s", name, got, want) - } -} - -func TestQuestionPackUnpack(t *testing.T) { - want := Question{ - Name: mustNewName("."), - Type: TypeA, - Class: ClassINET, - } - buf, err := want.pack(make([]byte, 1, 50), map[string]int{}) - if err != nil { - t.Fatal("Packing failed:", err) - } - var p Parser - p.msg = buf - p.header.questions = 1 - p.section = sectionQuestions - p.off = 1 - got, err := p.Question() - if err != nil { - t.Fatalf("Unpacking failed: %v\n%s", err, string(buf[1:])) - } - if p.off != len(buf) { - t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", p.off, len(buf)) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("Got = %+v, want = %+v", got, want) - } -} - -func TestName(t *testing.T) { - tests := []string{ - "", - ".", - "google..com", - "google.com", - "google..com.", - "google.com.", - ".google.com.", - "www..google.com.", - "www.google.com.", - } - - for _, test := range tests { - n, err := NewName(test) - if err != nil { - t.Errorf("Creating name for %q: %v", test, err) - continue - } - if ns := n.String(); ns != test { - t.Errorf("Got %#v.String() = %q, want = %q", n, ns, test) - continue - } - } -} - -func TestNamePackUnpack(t *testing.T) { - tests := []struct { - in string - want string - err error - }{ - {"", "", errNonCanonicalName}, - {".", ".", nil}, - {"google..com", "", errNonCanonicalName}, - {"google.com", "", errNonCanonicalName}, - {"google..com.", "", errZeroSegLen}, - {"google.com.", "google.com.", nil}, - {".google.com.", "", errZeroSegLen}, - {"www..google.com.", "", errZeroSegLen}, - {"www.google.com.", "www.google.com.", nil}, - } - - for _, test := range tests { - in := mustNewName(test.in) - want := mustNewName(test.want) - buf, err := in.pack(make([]byte, 0, 30), map[string]int{}) - if err != test.err { - t.Errorf("Packing of %q: got err = %v, want err = %v", test.in, err, test.err) - continue - } - if test.err != nil { - continue - } - var got Name - n, err := got.unpack(buf, 0) - if err != nil { - t.Errorf("Unpacking for %q failed: %v", test.in, err) - continue - } - if n != len(buf) { - t.Errorf( - "Unpacked different amount than packed for %q: got n = %d, want = %d", - test.in, - n, - len(buf), - ) - } - if got != want { - t.Errorf("Unpacking packing of %q: got = %#v, want = %#v", test.in, got, want) - } - } -} - -func checkErrorPrefix(err error, prefix string) bool { - e, ok := err.(*nestedError) - return ok && e.s == prefix -} - -func TestHeaderUnpackError(t *testing.T) { - wants := []string{ - "id", - "bits", - "questions", - "answers", - "authorities", - "additionals", - } - var buf []byte - var h header - for _, want := range wants { - n, err := h.unpack(buf, 0) - if n != 0 || !checkErrorPrefix(err, want) { - t.Errorf("got h.unpack([%d]byte, 0) = %d, %v, want = 0, %s", len(buf), n, err, want) - } - buf = append(buf, 0, 0) - } -} - -func TestParserStart(t *testing.T) { - const want = "unpacking header" - var p Parser - for i := 0; i <= 1; i++ { - _, err := p.Start([]byte{}) - if !checkErrorPrefix(err, want) { - t.Errorf("got p.Start(nil) = _, %v, want = _, %s", err, want) - } - } -} - -func TestResourceNotStarted(t *testing.T) { - tests := []struct { - name string - fn func(*Parser) error - }{ - {"CNAMEResource", func(p *Parser) error { _, err := p.CNAMEResource(); return err }}, - {"MXResource", func(p *Parser) error { _, err := p.MXResource(); return err }}, - {"NSResource", func(p *Parser) error { _, err := p.NSResource(); return err }}, - {"PTRResource", func(p *Parser) error { _, err := p.PTRResource(); return err }}, - {"SOAResource", func(p *Parser) error { _, err := p.SOAResource(); return err }}, - {"TXTResource", func(p *Parser) error { _, err := p.TXTResource(); return err }}, - {"SRVResource", func(p *Parser) error { _, err := p.SRVResource(); return err }}, - {"AResource", func(p *Parser) error { _, err := p.AResource(); return err }}, - {"AAAAResource", func(p *Parser) error { _, err := p.AAAAResource(); return err }}, - } - - for _, test := range tests { - if err := test.fn(&Parser{}); err != ErrNotStarted { - t.Errorf("got _, %v = p.%s(), want = _, %v", err, test.name, ErrNotStarted) - } - } -} - -func TestDNSPackUnpack(t *testing.T) { - wants := []Message{ - { - Questions: []Question{ - { - Name: mustNewName("."), - Type: TypeAAAA, - Class: ClassINET, - }, - }, - Answers: []Resource{}, - Authorities: []Resource{}, - Additionals: []Resource{}, - }, - largeTestMsg(), - } - for i, want := range wants { - b, err := want.Pack() - if err != nil { - t.Fatalf("%d: packing failed: %v", i, err) - } - var got Message - err = got.Unpack(b) - if err != nil { - t.Fatalf("%d: unpacking failed: %v", i, err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("%d: got = %+v, want = %+v", i, &got, &want) - } - } -} - -func TestSkipAll(t *testing.T) { - msg := largeTestMsg() - buf, err := msg.Pack() - if err != nil { - t.Fatal("Packing large test message:", err) - } - var p Parser - if _, err := p.Start(buf); err != nil { - t.Fatal(err) - } - - tests := []struct { - name string - f func() error - }{ - {"SkipAllQuestions", p.SkipAllQuestions}, - {"SkipAllAnswers", p.SkipAllAnswers}, - {"SkipAllAuthorities", p.SkipAllAuthorities}, - {"SkipAllAdditionals", p.SkipAllAdditionals}, - } - for _, test := range tests { - for i := 1; i <= 3; i++ { - if err := test.f(); err != nil { - t.Errorf("Call #%d to %s(): %v", i, test.name, err) - } - } - } -} - -func TestSkipEach(t *testing.T) { - msg := smallTestMsg() - - buf, err := msg.Pack() - if err != nil { - t.Fatal("Packing test message:", err) - } - var p Parser - if _, err := p.Start(buf); err != nil { - t.Fatal(err) - } - - tests := []struct { - name string - f func() error - }{ - {"SkipQuestion", p.SkipQuestion}, - {"SkipAnswer", p.SkipAnswer}, - {"SkipAuthority", p.SkipAuthority}, - {"SkipAdditional", p.SkipAdditional}, - } - for _, test := range tests { - if err := test.f(); err != nil { - t.Errorf("First call: got %s() = %v, want = %v", test.name, err, nil) - } - if err := test.f(); err != ErrSectionDone { - t.Errorf("Second call: got %s() = %v, want = %v", test.name, err, ErrSectionDone) - } - } -} - -func TestSkipAfterRead(t *testing.T) { - msg := smallTestMsg() - - buf, err := msg.Pack() - if err != nil { - t.Fatal("Packing test message:", err) - } - var p Parser - if _, err := p.Start(buf); err != nil { - t.Fatal(err) - } - - tests := []struct { - name string - skip func() error - read func() error - }{ - {"Question", p.SkipQuestion, func() error { _, err := p.Question(); return err }}, - {"Answer", p.SkipAnswer, func() error { _, err := p.Answer(); return err }}, - {"Authority", p.SkipAuthority, func() error { _, err := p.Authority(); return err }}, - {"Additional", p.SkipAdditional, func() error { _, err := p.Additional(); return err }}, - } - for _, test := range tests { - if err := test.read(); err != nil { - t.Errorf("Got %s() = _, %v, want = _, %v", test.name, err, nil) - } - if err := test.skip(); err != ErrSectionDone { - t.Errorf("Got Skip%s() = %v, want = %v", test.name, err, ErrSectionDone) - } - } -} - -func TestSkipNotStarted(t *testing.T) { - var p Parser - - tests := []struct { - name string - f func() error - }{ - {"SkipAllQuestions", p.SkipAllQuestions}, - {"SkipAllAnswers", p.SkipAllAnswers}, - {"SkipAllAuthorities", p.SkipAllAuthorities}, - {"SkipAllAdditionals", p.SkipAllAdditionals}, - } - for _, test := range tests { - if err := test.f(); err != ErrNotStarted { - t.Errorf("Got %s() = %v, want = %v", test.name, err, ErrNotStarted) - } - } -} - -func TestTooManyRecords(t *testing.T) { - const recs = int(^uint16(0)) + 1 - tests := []struct { - name string - msg Message - want error - }{ - { - "Questions", - Message{ - Questions: make([]Question, recs), - }, - errTooManyQuestions, - }, - { - "Answers", - Message{ - Answers: make([]Resource, recs), - }, - errTooManyAnswers, - }, - { - "Authorities", - Message{ - Authorities: make([]Resource, recs), - }, - errTooManyAuthorities, - }, - { - "Additionals", - Message{ - Additionals: make([]Resource, recs), - }, - errTooManyAdditionals, - }, - } - - for _, test := range tests { - if _, got := test.msg.Pack(); got != test.want { - t.Errorf("Packing %d %s: got = %v, want = %v", recs, test.name, got, test.want) - } - } -} - -func TestVeryLongTxt(t *testing.T) { - want := Resource{ - ResourceHeader{ - Name: mustNewName("foo.bar.example.com."), - Type: TypeTXT, - Class: ClassINET, - }, - &TXTResource{loremIpsum}, - } - buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}) - if err != nil { - t.Fatal("Packing failed:", err) - } - var got Resource - off, err := got.Header.unpack(buf, 0) - if err != nil { - t.Fatal("Unpacking ResourceHeader failed:", err) - } - body, n, err := unpackResourceBody(buf, off, got.Header) - if err != nil { - t.Fatal("Unpacking failed:", err) - } - got.Body = body - if n != len(buf) { - t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", n, len(buf)) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("Got = %#v, want = %#v", got, want) - } -} - -func TestStartError(t *testing.T) { - tests := []struct { - name string - fn func(*Builder) error - }{ - {"Questions", func(b *Builder) error { return b.StartQuestions() }}, - {"Answers", func(b *Builder) error { return b.StartAnswers() }}, - {"Authorities", func(b *Builder) error { return b.StartAuthorities() }}, - {"Additionals", func(b *Builder) error { return b.StartAdditionals() }}, - } - - envs := []struct { - name string - fn func() *Builder - want error - }{ - {"sectionNotStarted", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted}, - {"sectionDone", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone}, - } - - for _, env := range envs { - for _, test := range tests { - if got := test.fn(env.fn()); got != env.want { - t.Errorf("got Builder{%s}.Start%s = %v, want = %v", env.name, test.name, got, env.want) - } - } - } -} - -func TestBuilderResourceError(t *testing.T) { - tests := []struct { - name string - fn func(*Builder) error - }{ - {"CNAMEResource", func(b *Builder) error { return b.CNAMEResource(ResourceHeader{}, CNAMEResource{}) }}, - {"MXResource", func(b *Builder) error { return b.MXResource(ResourceHeader{}, MXResource{}) }}, - {"NSResource", func(b *Builder) error { return b.NSResource(ResourceHeader{}, NSResource{}) }}, - {"PTRResource", func(b *Builder) error { return b.PTRResource(ResourceHeader{}, PTRResource{}) }}, - {"SOAResource", func(b *Builder) error { return b.SOAResource(ResourceHeader{}, SOAResource{}) }}, - {"TXTResource", func(b *Builder) error { return b.TXTResource(ResourceHeader{}, TXTResource{}) }}, - {"SRVResource", func(b *Builder) error { return b.SRVResource(ResourceHeader{}, SRVResource{}) }}, - {"AResource", func(b *Builder) error { return b.AResource(ResourceHeader{}, AResource{}) }}, - {"AAAAResource", func(b *Builder) error { return b.AAAAResource(ResourceHeader{}, AAAAResource{}) }}, - } - - envs := []struct { - name string - fn func() *Builder - want error - }{ - {"sectionNotStarted", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted}, - {"sectionHeader", func() *Builder { return &Builder{section: sectionHeader} }, ErrNotStarted}, - {"sectionQuestions", func() *Builder { return &Builder{section: sectionQuestions} }, ErrNotStarted}, - {"sectionDone", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone}, - } - - for _, env := range envs { - for _, test := range tests { - if got := test.fn(env.fn()); got != env.want { - t.Errorf("got Builder{%s}.%s = %v, want = %v", env.name, test.name, got, env.want) - } - } - } -} - -func TestFinishError(t *testing.T) { - var b Builder - want := ErrNotStarted - if _, got := b.Finish(); got != want { - t.Errorf("got Builder{}.Finish() = %v, want = %v", got, want) - } -} - -func TestBuilder(t *testing.T) { - msg := largeTestMsg() - want, err := msg.Pack() - if err != nil { - t.Fatal("Packing without builder:", err) - } - - var b Builder - b.Start(nil, msg.Header) - - if err := b.StartQuestions(); err != nil { - t.Fatal("b.StartQuestions():", err) - } - for _, q := range msg.Questions { - if err := b.Question(q); err != nil { - t.Fatalf("b.Question(%#v): %v", q, err) - } - } - - if err := b.StartAnswers(); err != nil { - t.Fatal("b.StartAnswers():", err) - } - for _, a := range msg.Answers { - switch a.Header.Type { - case TypeA: - if err := b.AResource(a.Header, *a.Body.(*AResource)); err != nil { - t.Fatalf("b.AResource(%#v): %v", a, err) - } - case TypeNS: - if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { - t.Fatalf("b.NSResource(%#v): %v", a, err) - } - case TypeCNAME: - if err := b.CNAMEResource(a.Header, *a.Body.(*CNAMEResource)); err != nil { - t.Fatalf("b.CNAMEResource(%#v): %v", a, err) - } - case TypeSOA: - if err := b.SOAResource(a.Header, *a.Body.(*SOAResource)); err != nil { - t.Fatalf("b.SOAResource(%#v): %v", a, err) - } - case TypePTR: - if err := b.PTRResource(a.Header, *a.Body.(*PTRResource)); err != nil { - t.Fatalf("b.PTRResource(%#v): %v", a, err) - } - case TypeMX: - if err := b.MXResource(a.Header, *a.Body.(*MXResource)); err != nil { - t.Fatalf("b.MXResource(%#v): %v", a, err) - } - case TypeTXT: - if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { - t.Fatalf("b.TXTResource(%#v): %v", a, err) - } - case TypeAAAA: - if err := b.AAAAResource(a.Header, *a.Body.(*AAAAResource)); err != nil { - t.Fatalf("b.AAAAResource(%#v): %v", a, err) - } - case TypeSRV: - if err := b.SRVResource(a.Header, *a.Body.(*SRVResource)); err != nil { - t.Fatalf("b.SRVResource(%#v): %v", a, err) - } - } - } - - if err := b.StartAuthorities(); err != nil { - t.Fatal("b.StartAuthorities():", err) - } - for _, a := range msg.Authorities { - if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { - t.Fatalf("b.NSResource(%#v): %v", a, err) - } - } - - if err := b.StartAdditionals(); err != nil { - t.Fatal("b.StartAdditionals():", err) - } - for _, a := range msg.Additionals { - if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { - t.Fatalf("b.TXTResource(%#v): %v", a, err) - } - } - - got, err := b.Finish() - if err != nil { - t.Fatal("b.Finish():", err) - } - if !bytes.Equal(got, want) { - t.Fatalf("Got from Builder: %#v\nwant = %#v", got, want) - } -} - -func TestResourcePack(t *testing.T) { - for _, tt := range []struct { - m Message - err error - }{ - { - Message{ - Questions: []Question{ - { - Name: mustNewName("."), - Type: TypeAAAA, - Class: ClassINET, - }, - }, - Answers: []Resource{{ResourceHeader{}, nil}}, - }, - &nestedError{"packing Answer", errNilResouceBody}, - }, - { - Message{ - Questions: []Question{ - { - Name: mustNewName("."), - Type: TypeAAAA, - Class: ClassINET, - }, - }, - Authorities: []Resource{{ResourceHeader{}, (*NSResource)(nil)}}, - }, - &nestedError{"packing Authority", - &nestedError{"ResourceHeader", - &nestedError{"Name", errNonCanonicalName}, - }, - }, - }, - { - Message{ - Questions: []Question{ - { - Name: mustNewName("."), - Type: TypeA, - Class: ClassINET, - }, - }, - Additionals: []Resource{{ResourceHeader{}, nil}}, - }, - &nestedError{"packing Additional", errNilResouceBody}, - }, - } { - _, err := tt.m.Pack() - if !reflect.DeepEqual(err, tt.err) { - t.Errorf("got %v for %v; want %v", err, tt.m, tt.err) - } - } -} - -func BenchmarkParsing(b *testing.B) { - b.ReportAllocs() - - name := mustNewName("foo.bar.example.com.") - msg := Message{ - Header: Header{Response: true, Authoritative: true}, - Questions: []Question{ - { - Name: name, - Type: TypeA, - Class: ClassINET, - }, - }, - Answers: []Resource{ - { - ResourceHeader{ - Name: name, - Class: ClassINET, - }, - &AResource{[4]byte{}}, - }, - { - ResourceHeader{ - Name: name, - Class: ClassINET, - }, - &AAAAResource{[16]byte{}}, - }, - { - ResourceHeader{ - Name: name, - Class: ClassINET, - }, - &CNAMEResource{name}, - }, - { - ResourceHeader{ - Name: name, - Class: ClassINET, - }, - &NSResource{name}, - }, - }, - } - - buf, err := msg.Pack() - if err != nil { - b.Fatal("msg.Pack():", err) - } - - for i := 0; i < b.N; i++ { - var p Parser - if _, err := p.Start(buf); err != nil { - b.Fatal("p.Start(buf):", err) - } - - for { - _, err := p.Question() - if err == ErrSectionDone { - break - } - if err != nil { - b.Fatal("p.Question():", err) - } - } - - for { - h, err := p.AnswerHeader() - if err == ErrSectionDone { - break - } - if err != nil { - panic(err) - } - - switch h.Type { - case TypeA: - if _, err := p.AResource(); err != nil { - b.Fatal("p.AResource():", err) - } - case TypeAAAA: - if _, err := p.AAAAResource(); err != nil { - b.Fatal("p.AAAAResource():", err) - } - case TypeCNAME: - if _, err := p.CNAMEResource(); err != nil { - b.Fatal("p.CNAMEResource():", err) - } - case TypeNS: - if _, err := p.NSResource(); err != nil { - b.Fatal("p.NSResource():", err) - } - default: - b.Fatalf("unknown type: %T", h) - } - } - } -} - -func BenchmarkBuilding(b *testing.B) { - b.ReportAllocs() - - name := mustNewName("foo.bar.example.com.") - buf := make([]byte, 0, packStartingCap) - - for i := 0; i < b.N; i++ { - var bld Builder - bld.StartWithoutCompression(buf, Header{Response: true, Authoritative: true}) - - if err := bld.StartQuestions(); err != nil { - b.Fatal("bld.StartQuestions():", err) - } - q := Question{ - Name: name, - Type: TypeA, - Class: ClassINET, - } - if err := bld.Question(q); err != nil { - b.Fatalf("bld.Question(%+v): %v", q, err) - } - - hdr := ResourceHeader{ - Name: name, - Class: ClassINET, - } - if err := bld.StartAnswers(); err != nil { - b.Fatal("bld.StartQuestions():", err) - } - - ar := AResource{[4]byte{}} - if err := bld.AResource(hdr, ar); err != nil { - b.Fatalf("bld.AResource(%+v, %+v): %v", hdr, ar, err) - } - - aaar := AAAAResource{[16]byte{}} - if err := bld.AAAAResource(hdr, aaar); err != nil { - b.Fatalf("bld.AAAAResource(%+v, %+v): %v", hdr, aaar, err) - } - - cnr := CNAMEResource{name} - if err := bld.CNAMEResource(hdr, cnr); err != nil { - b.Fatalf("bld.CNAMEResource(%+v, %+v): %v", hdr, cnr, err) - } - - nsr := NSResource{name} - if err := bld.NSResource(hdr, nsr); err != nil { - b.Fatalf("bld.NSResource(%+v, %+v): %v", hdr, nsr, err) - } - - if _, err := bld.Finish(); err != nil { - b.Fatal("bld.Finish():", err) - } - } -} - -func smallTestMsg() Message { - name := mustNewName("example.com.") - return Message{ - Header: Header{Response: true, Authoritative: true}, - Questions: []Question{ - { - Name: name, - Type: TypeA, - Class: ClassINET, - }, - }, - Answers: []Resource{ - { - ResourceHeader{ - Name: name, - Type: TypeA, - Class: ClassINET, - }, - &AResource{[4]byte{127, 0, 0, 1}}, - }, - }, - Authorities: []Resource{ - { - ResourceHeader{ - Name: name, - Type: TypeA, - Class: ClassINET, - }, - &AResource{[4]byte{127, 0, 0, 1}}, - }, - }, - Additionals: []Resource{ - { - ResourceHeader{ - Name: name, - Type: TypeA, - Class: ClassINET, - }, - &AResource{[4]byte{127, 0, 0, 1}}, - }, - }, - } -} - -func largeTestMsg() Message { - name := mustNewName("foo.bar.example.com.") - return Message{ - Header: Header{Response: true, Authoritative: true}, - Questions: []Question{ - { - Name: name, - Type: TypeA, - Class: ClassINET, - }, - }, - Answers: []Resource{ - { - ResourceHeader{ - Name: name, - Type: TypeA, - Class: ClassINET, - }, - &AResource{[4]byte{127, 0, 0, 1}}, - }, - { - ResourceHeader{ - Name: name, - Type: TypeA, - Class: ClassINET, - }, - &AResource{[4]byte{127, 0, 0, 2}}, - }, - { - ResourceHeader{ - Name: name, - Type: TypeAAAA, - Class: ClassINET, - }, - &AAAAResource{[16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}, - }, - { - ResourceHeader{ - Name: name, - Type: TypeCNAME, - Class: ClassINET, - }, - &CNAMEResource{mustNewName("alias.example.com.")}, - }, - { - ResourceHeader{ - Name: name, - Type: TypeSOA, - Class: ClassINET, - }, - &SOAResource{ - NS: mustNewName("ns1.example.com."), - MBox: mustNewName("mb.example.com."), - Serial: 1, - Refresh: 2, - Retry: 3, - Expire: 4, - MinTTL: 5, - }, - }, - { - ResourceHeader{ - Name: name, - Type: TypePTR, - Class: ClassINET, - }, - &PTRResource{mustNewName("ptr.example.com.")}, - }, - { - ResourceHeader{ - Name: name, - Type: TypeMX, - Class: ClassINET, - }, - &MXResource{ - 7, - mustNewName("mx.example.com."), - }, - }, - { - ResourceHeader{ - Name: name, - Type: TypeSRV, - Class: ClassINET, - }, - &SRVResource{ - 8, - 9, - 11, - mustNewName("srv.example.com."), - }, - }, - }, - Authorities: []Resource{ - { - ResourceHeader{ - Name: name, - Type: TypeNS, - Class: ClassINET, - }, - &NSResource{mustNewName("ns1.example.com.")}, - }, - { - ResourceHeader{ - Name: name, - Type: TypeNS, - Class: ClassINET, - }, - &NSResource{mustNewName("ns2.example.com.")}, - }, - }, - Additionals: []Resource{ - { - ResourceHeader{ - Name: name, - Type: TypeTXT, - Class: ClassINET, - }, - &TXTResource{"So Long, and Thanks for All the Fish"}, - }, - { - ResourceHeader{ - Name: name, - Type: TypeTXT, - Class: ClassINET, - }, - &TXTResource{"Hamster Huey and the Gooey Kablooie"}, - }, - }, - } -} - -const loremIpsum = ` -Lorem ipsum dolor sit amet, nec enim antiopam id, an ullum choro -nonumes qui, pro eu debet honestatis mediocritatem. No alia enim eos, -magna signiferumque ex vis. Mei no aperiri dissentias, cu vel quas -regione. Malorum quaeque vim ut, eum cu semper aliquid invidunt, ei -nam ipsum assentior. - -Nostrum appellantur usu no, vis ex probatus adipiscing. Cu usu illum -facilis eleifend. Iusto conceptam complectitur vim id. Tale omnesque -no usu, ei oblique sadipscing vim. At nullam voluptua usu, mei laudem -reformidans et. Qui ei eros porro reformidans, ius suas veritus -torquatos ex. Mea te facer alterum consequat. - -Soleat torquatos democritum sed et, no mea congue appareat, facer -aliquam nec in. Has te ipsum tritani. At justo dicta option nec, movet -phaedrum ad nam. Ea detracto verterem liberavisse has, delectus -suscipiantur in mei. Ex nam meliore complectitur. Ut nam omnis -honestatis quaerendum, ea mea nihil affert detracto, ad vix rebum -mollis. - -Ut epicurei praesent neglegentur pri, prima fuisset intellegebat ad -vim. An habemus comprehensam usu, at enim dignissim pro. Eam reque -vivendum adipisci ea. Vel ne odio choro minimum. Sea admodum -dissentiet ex. Mundi tamquam evertitur ius cu. Homero postea iisque ut -pro, vel ne saepe senserit consetetur. - -Nulla utamur facilisis ius ea, in viderer diceret pertinax eum. Mei no -enim quodsi facilisi, ex sed aeterno appareat mediocritatem, eum -sententiae deterruisset ut. At suas timeam euismod cum, offendit -appareat interpretaris ne vix. Vel ea civibus albucius, ex vim quidam -accusata intellegebat, noluisse instructior sea id. Nec te nonumes -habemus appellantur, quis dignissim vituperata eu nam. - -At vix apeirian patrioque vituperatoribus, an usu agam assum. Debet -iisque an mea. Per eu dicant ponderum accommodare. Pri alienum -placerat senserit an, ne eum ferri abhorreant vituperatoribus. Ut mea -eligendi disputationi. Ius no tation everti impedit, ei magna quidam -mediocritatem pri. - -Legendos perpetua iracundia ne usu, no ius ullum epicurei intellegam, -ad modus epicuri lucilius eam. In unum quaerendum usu. Ne diam paulo -has, ea veri virtute sed. Alia honestatis conclusionemque mea eu, ut -iudico albucius his. - -Usu essent probatus eu, sed omnis dolor delicatissimi ex. No qui augue -dissentias dissentiet. Laudem recteque no usu, vel an velit noluisse, -an sed utinam eirmod appetere. Ne mea fuisset inimicus ocurreret. At -vis dicant abhorreant, utinam forensibus nec ne, mei te docendi -consequat. Brute inermis persecuti cum id. Ut ipsum munere propriae -usu, dicit graeco disputando id has. - -Eros dolore quaerendum nam ei. Timeam ornatus inciderint pro id. Nec -torquatos sadipscing ei, ancillae molestie per in. Malis principes duo -ea, usu liber postulant ei. - -Graece timeam voluptatibus eu eam. Alia probatus quo no, ea scripta -feugiat duo. Congue option meliore ex qui, noster invenire appellantur -ea vel. Eu exerci legendos vel. Consetetur repudiandae vim ut. Vix an -probo minimum, et nam illud falli tempor. - -Cum dico signiferumque eu. Sed ut regione maiorum, id veritus insolens -tacimates vix. Eu mel sint tamquam lucilius, duo no oporteat -tacimates. Atqui augue concludaturque vix ei, id mel utroque menandri. - -Ad oratio blandit aliquando pro. Vis et dolorum rationibus -philosophia, ad cum nulla molestie. Hinc fuisset adversarium eum et, -ne qui nisl verear saperet, vel te quaestio forensibus. Per odio -option delenit an. Alii placerat has no, in pri nihil platonem -cotidieque. Est ut elit copiosae scaevola, debet tollit maluisset sea -an. - -Te sea hinc debet pericula, liber ridens fabulas cu sed, quem mutat -accusam mea et. Elitr labitur albucius et pri, an labore feugait mel. -Velit zril melius usu ea. Ad stet putent interpretaris qui. Mel no -error volumus scripserit. In pro paulo iudico, quo ei dolorem -verterem, affert fabellas dissentiet ea vix. - -Vis quot deserunt te. Error aliquid detraxit eu usu, vis alia eruditi -salutatus cu. Est nostrud bonorum an, ei usu alii salutatus. Vel at -nisl primis, eum ex aperiri noluisse reformidans. Ad veri velit -utroque vis, ex equidem detraxit temporibus has. - -Inermis appareat usu ne. Eros placerat periculis mea ad, in dictas -pericula pro. Errem postulant at usu, ea nec amet ornatus mentitum. Ad -mazim graeco eum, vel ex percipit volutpat iudicabit, sit ne delicata -interesset. Mel sapientem prodesset abhorreant et, oblique suscipit -eam id. - -An maluisset disputando mea, vidit mnesarchum pri et. Malis insolens -inciderint no sea. Ea persius maluisset vix, ne vim appellantur -instructior, consul quidam definiebas pri id. Cum integre feugiat -pericula in, ex sed persius similique, mel ne natum dicit percipitur. - -Primis discere ne pri, errem putent definitionem at vis. Ei mel dolore -neglegentur, mei tincidunt percipitur ei. Pro ad simul integre -rationibus. Eu vel alii honestatis definitiones, mea no nonumy -reprehendunt. - -Dicta appareat legendos est cu. Eu vel congue dicunt omittam, no vix -adhuc minimum constituam, quot noluisse id mel. Eu quot sale mutat -duo, ex nisl munere invenire duo. Ne nec ullum utamur. Pro alterum -debitis nostrum no, ut vel aliquid vivendo. - -Aliquip fierent praesent quo ne, id sit audiam recusabo delicatissimi. -Usu postulant incorrupte cu. At pro dicit tibique intellegam, cibo -dolore impedit id eam, et aeque feugait assentior has. Quando sensibus -nec ex. Possit sensibus pri ad, unum mutat periculis cu vix. - -Mundi tibique vix te, duo simul partiendo qualisque id, est at vidit -sonet tempor. No per solet aeterno deseruisse. Petentium salutandi -definiebas pri cu. Munere vivendum est in. Ei justo congue eligendi -vis, modus offendit omittantur te mel. - -Integre voluptaria in qui, sit habemus tractatos constituam no. Utinam -melius conceptam est ne, quo in minimum apeirian delicata, ut ius -porro recusabo. Dicant expetenda vix no, ludus scripserit sed ex, eu -his modo nostro. Ut etiam sonet his, quodsi inciderint philosophia te -per. Nullam lobortis eu cum, vix an sonet efficiendi repudiandae. Vis -ad idque fabellas intellegebat. - -Eum commodo senserit conclusionemque ex. Sed forensibus sadipscing ut, -mei in facer delicata periculis, sea ne hinc putent cetero. Nec ne -alia corpora invenire, alia prima soleat te cum. Eleifend posidonium -nam at. - -Dolorum indoctum cu quo, ex dolor legendos recteque eam, cu pri zril -discere. Nec civibus officiis dissentiunt ex, est te liber ludus -elaboraret. Cum ea fabellas invenire. Ex vim nostrud eripuit -comprehensam, nam te inermis delectus, saepe inermis senserit. -` diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go deleted file mode 100644 index cd0a8ac1..00000000 --- a/vendor/golang.org/x/net/html/atom/atom.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package atom provides integer codes (also known as atoms) for a fixed set of -// frequently occurring HTML strings: tag names and attribute keys such as "p" -// and "id". -// -// Sharing an atom's name between all elements with the same tag can result in -// fewer string allocations when tokenizing and parsing HTML. Integer -// comparisons are also generally faster than string comparisons. -// -// The value of an atom's particular code is not guaranteed to stay the same -// between versions of this package. Neither is any ordering guaranteed: -// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to -// be dense. The only guarantees are that e.g. looking up "div" will yield -// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. -package atom // import "golang.org/x/net/html/atom" - -// Atom is an integer code for a string. The zero value maps to "". -type Atom uint32 - -// String returns the atom's name. -func (a Atom) String() string { - start := uint32(a >> 8) - n := uint32(a & 0xff) - if start+n > uint32(len(atomText)) { - return "" - } - return atomText[start : start+n] -} - -func (a Atom) string() string { - return atomText[a>>8 : a>>8+a&0xff] -} - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s []byte) uint32 { - for i := range s { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -func match(s string, t []byte) bool { - for i, c := range t { - if s[i] != c { - return false - } - } - return true -} - -// Lookup returns the atom whose name is s. It returns zero if there is no -// such atom. The lookup is case sensitive. -func Lookup(s []byte) Atom { - if len(s) == 0 || len(s) > maxAtomLen { - return 0 - } - h := fnv(hash0, s) - if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - return 0 -} - -// String returns a string whose contents are equal to s. In that sense, it is -// equivalent to string(s) but may be more efficient. -func String(s []byte) string { - if a := Lookup(s); a != 0 { - return a.String() - } - return string(s) -} diff --git a/vendor/golang.org/x/net/html/atom/atom_test.go b/vendor/golang.org/x/net/html/atom/atom_test.go deleted file mode 100644 index 6e33704d..00000000 --- a/vendor/golang.org/x/net/html/atom/atom_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atom - -import ( - "sort" - "testing" -) - -func TestKnown(t *testing.T) { - for _, s := range testAtomList { - if atom := Lookup([]byte(s)); atom.String() != s { - t.Errorf("Lookup(%q) = %#x (%q)", s, uint32(atom), atom.String()) - } - } -} - -func TestHits(t *testing.T) { - for _, a := range table { - if a == 0 { - continue - } - got := Lookup([]byte(a.String())) - if got != a { - t.Errorf("Lookup(%q) = %#x, want %#x", a.String(), uint32(got), uint32(a)) - } - } -} - -func TestMisses(t *testing.T) { - testCases := []string{ - "", - "\x00", - "\xff", - "A", - "DIV", - "Div", - "dIV", - "aa", - "a\x00", - "ab", - "abb", - "abbr0", - "abbr ", - " abbr", - " a", - "acceptcharset", - "acceptCharset", - "accept_charset", - "h0", - "h1h2", - "h7", - "onClick", - "λ", - // The following string has the same hash (0xa1d7fab7) as "onmouseover". - "\x00\x00\x00\x00\x00\x50\x18\xae\x38\xd0\xb7", - } - for _, tc := range testCases { - got := Lookup([]byte(tc)) - if got != 0 { - t.Errorf("Lookup(%q): got %d, want 0", tc, got) - } - } -} - -func TestForeignObject(t *testing.T) { - const ( - afo = Foreignobject - afO = ForeignObject - sfo = "foreignobject" - sfO = "foreignObject" - ) - if got := Lookup([]byte(sfo)); got != afo { - t.Errorf("Lookup(%q): got %#v, want %#v", sfo, got, afo) - } - if got := Lookup([]byte(sfO)); got != afO { - t.Errorf("Lookup(%q): got %#v, want %#v", sfO, got, afO) - } - if got := afo.String(); got != sfo { - t.Errorf("Atom(%#v).String(): got %q, want %q", afo, got, sfo) - } - if got := afO.String(); got != sfO { - t.Errorf("Atom(%#v).String(): got %q, want %q", afO, got, sfO) - } -} - -func BenchmarkLookup(b *testing.B) { - sortedTable := make([]string, 0, len(table)) - for _, a := range table { - if a != 0 { - sortedTable = append(sortedTable, a.String()) - } - } - sort.Strings(sortedTable) - - x := make([][]byte, 1000) - for i := range x { - x[i] = []byte(sortedTable[i%len(sortedTable)]) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, s := range x { - Lookup(s) - } - } -} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go deleted file mode 100644 index 6bfa8660..00000000 --- a/vendor/golang.org/x/net/html/atom/gen.go +++ /dev/null @@ -1,648 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates table.go and table_test.go. -// Invoke as -// -// go run gen.go |gofmt >table.go -// go run gen.go -test |gofmt >table_test.go - -import ( - "flag" - "fmt" - "math/rand" - "os" - "sort" - "strings" -) - -// identifier converts s to a Go exported identifier. -// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". -func identifier(s string) string { - b := make([]byte, 0, len(s)) - cap := true - for _, c := range s { - if c == '-' { - cap = true - continue - } - if cap && 'a' <= c && c <= 'z' { - c -= 'a' - 'A' - } - cap = false - b = append(b, byte(c)) - } - return string(b) -} - -var test = flag.Bool("test", false, "generate table_test.go") - -func main() { - flag.Parse() - - var all []string - all = append(all, elements...) - all = append(all, attributes...) - all = append(all, eventHandlers...) - all = append(all, extra...) - sort.Strings(all) - - if *test { - fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n") - fmt.Printf("package atom\n\n") - fmt.Printf("var testAtomList = []string{\n") - for _, s := range all { - fmt.Printf("\t%q,\n", s) - } - fmt.Printf("}\n") - return - } - - // uniq - lists have dups - // compute max len too - maxLen := 0 - w := 0 - for _, s := range all { - if w == 0 || all[w-1] != s { - if maxLen < len(s) { - maxLen = len(s) - } - all[w] = s - w++ - } - } - all = all[:w] - - // Find hash that minimizes table size. - var best *table - for i := 0; i < 1000000; i++ { - if best != nil && 1<<(best.k-1) < len(all) { - break - } - h := rand.Uint32() - for k := uint(0); k <= 16; k++ { - if best != nil && k >= best.k { - break - } - var t table - if t.init(h, k, all) { - best = &t - break - } - } - } - if best == nil { - fmt.Fprintf(os.Stderr, "failed to construct string table\n") - os.Exit(1) - } - - // Lay out strings, using overlaps when possible. - layout := append([]string{}, all...) - - // Remove strings that are substrings of other strings - for changed := true; changed; { - changed = false - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i != j && t != "" && strings.Contains(s, t) { - changed = true - layout[j] = "" - } - } - } - } - - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } - } - if bestk > 0 { - layout[besti] += layout[bestj][bestk:] - layout[bestj] = "" - continue - } - break - } - - text := strings.Join(layout, "") - - atom := map[string]uint32{} - for _, s := range all { - off := strings.Index(text, s) - if off < 0 { - panic("lost string " + s) - } - atom[s] = uint32(off<<8 | len(s)) - } - - // Generate the Go code. - fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Printf("package atom\n\nconst (\n") - for _, s := range all { - fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s]) - } - fmt.Printf(")\n\n") - - fmt.Printf("const hash0 = %#x\n\n", best.h0) - fmt.Printf("const maxAtomLen = %d\n\n", maxLen) - - fmt.Printf("var table = [1<<%d]Atom{\n", best.k) - for i, s := range best.tab { - if s == "" { - continue - } - fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s) - } - fmt.Printf("}\n") - datasize := (1 << best.k) * 4 - - fmt.Printf("const atomText =\n") - textsize := len(text) - for len(text) > 60 { - fmt.Printf("\t%q +\n", text[:60]) - text = text[60:] - } - fmt.Printf("\t%q\n\n", text) - - fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) -} - -type byLen []string - -func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } -func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byLen) Len() int { return len(x) } - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// A table represents an attempt at constructing the lookup table. -// The lookup table uses cuckoo hashing, meaning that each string -// can be found in one of two positions. -type table struct { - h0 uint32 - k uint - mask uint32 - tab []string -} - -// hash returns the two hashes for s. -func (t *table) hash(s string) (h1, h2 uint32) { - h := fnv(t.h0, s) - h1 = h & t.mask - h2 = (h >> 16) & t.mask - return -} - -// init initializes the table with the given parameters. -// h0 is the initial hash value, -// k is the number of bits of hash value to use, and -// x is the list of strings to store in the table. -// init returns false if the table cannot be constructed. -func (t *table) init(h0 uint32, k uint, x []string) bool { - t.h0 = h0 - t.k = k - t.tab = make([]string, 1< len(t.tab) { - return false - } - s := t.tab[i] - h1, h2 := t.hash(s) - j := h1 + h2 - i - if t.tab[j] != "" && !t.push(j, depth+1) { - return false - } - t.tab[j] = s - return true -} - -// The lists of element names and attribute keys were taken from -// https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 21 February 2015" version. - -var elements = []string{ - "a", - "abbr", - "address", - "area", - "article", - "aside", - "audio", - "b", - "base", - "bdi", - "bdo", - "blockquote", - "body", - "br", - "button", - "canvas", - "caption", - "cite", - "code", - "col", - "colgroup", - "command", - "data", - "datalist", - "dd", - "del", - "details", - "dfn", - "dialog", - "div", - "dl", - "dt", - "em", - "embed", - "fieldset", - "figcaption", - "figure", - "footer", - "form", - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "head", - "header", - "hgroup", - "hr", - "html", - "i", - "iframe", - "img", - "input", - "ins", - "kbd", - "keygen", - "label", - "legend", - "li", - "link", - "map", - "mark", - "menu", - "menuitem", - "meta", - "meter", - "nav", - "noscript", - "object", - "ol", - "optgroup", - "option", - "output", - "p", - "param", - "pre", - "progress", - "q", - "rp", - "rt", - "ruby", - "s", - "samp", - "script", - "section", - "select", - "small", - "source", - "span", - "strong", - "style", - "sub", - "summary", - "sup", - "table", - "tbody", - "td", - "template", - "textarea", - "tfoot", - "th", - "thead", - "time", - "title", - "tr", - "track", - "u", - "ul", - "var", - "video", - "wbr", -} - -// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 - -var attributes = []string{ - "abbr", - "accept", - "accept-charset", - "accesskey", - "action", - "alt", - "async", - "autocomplete", - "autofocus", - "autoplay", - "challenge", - "charset", - "checked", - "cite", - "class", - "cols", - "colspan", - "command", - "content", - "contenteditable", - "contextmenu", - "controls", - "coords", - "crossorigin", - "data", - "datetime", - "default", - "defer", - "dir", - "dirname", - "disabled", - "download", - "draggable", - "dropzone", - "enctype", - "for", - "form", - "formaction", - "formenctype", - "formmethod", - "formnovalidate", - "formtarget", - "headers", - "height", - "hidden", - "high", - "href", - "hreflang", - "http-equiv", - "icon", - "id", - "inputmode", - "ismap", - "itemid", - "itemprop", - "itemref", - "itemscope", - "itemtype", - "keytype", - "kind", - "label", - "lang", - "list", - "loop", - "low", - "manifest", - "max", - "maxlength", - "media", - "mediagroup", - "method", - "min", - "minlength", - "multiple", - "muted", - "name", - "novalidate", - "open", - "optimum", - "pattern", - "ping", - "placeholder", - "poster", - "preload", - "radiogroup", - "readonly", - "rel", - "required", - "reversed", - "rows", - "rowspan", - "sandbox", - "spellcheck", - "scope", - "scoped", - "seamless", - "selected", - "shape", - "size", - "sizes", - "sortable", - "sorted", - "span", - "src", - "srcdoc", - "srclang", - "start", - "step", - "style", - "tabindex", - "target", - "title", - "translate", - "type", - "typemustmatch", - "usemap", - "value", - "width", - "wrap", -} - -var eventHandlers = []string{ - "onabort", - "onautocomplete", - "onautocompleteerror", - "onafterprint", - "onbeforeprint", - "onbeforeunload", - "onblur", - "oncancel", - "oncanplay", - "oncanplaythrough", - "onchange", - "onclick", - "onclose", - "oncontextmenu", - "oncuechange", - "ondblclick", - "ondrag", - "ondragend", - "ondragenter", - "ondragleave", - "ondragover", - "ondragstart", - "ondrop", - "ondurationchange", - "onemptied", - "onended", - "onerror", - "onfocus", - "onhashchange", - "oninput", - "oninvalid", - "onkeydown", - "onkeypress", - "onkeyup", - "onlanguagechange", - "onload", - "onloadeddata", - "onloadedmetadata", - "onloadstart", - "onmessage", - "onmousedown", - "onmousemove", - "onmouseout", - "onmouseover", - "onmouseup", - "onmousewheel", - "onoffline", - "ononline", - "onpagehide", - "onpageshow", - "onpause", - "onplay", - "onplaying", - "onpopstate", - "onprogress", - "onratechange", - "onreset", - "onresize", - "onscroll", - "onseeked", - "onseeking", - "onselect", - "onshow", - "onsort", - "onstalled", - "onstorage", - "onsubmit", - "onsuspend", - "ontimeupdate", - "ontoggle", - "onunload", - "onvolumechange", - "onwaiting", -} - -// extra are ad-hoc values not covered by any of the lists above. -var extra = []string{ - "align", - "annotation", - "annotation-xml", - "applet", - "basefont", - "bgsound", - "big", - "blink", - "center", - "color", - "desc", - "face", - "font", - "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. - "foreignobject", - "frame", - "frameset", - "image", - "isindex", - "listing", - "malignmark", - "marquee", - "math", - "mglyph", - "mi", - "mn", - "mo", - "ms", - "mtext", - "nobr", - "noembed", - "noframes", - "plaintext", - "prompt", - "public", - "spacer", - "strike", - "svg", - "system", - "tt", - "xmp", -} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go deleted file mode 100644 index 2605ba31..00000000 --- a/vendor/golang.org/x/net/html/atom/table.go +++ /dev/null @@ -1,713 +0,0 @@ -// generated by go run gen.go; DO NOT EDIT - -package atom - -const ( - A Atom = 0x1 - Abbr Atom = 0x4 - Accept Atom = 0x2106 - AcceptCharset Atom = 0x210e - Accesskey Atom = 0x3309 - Action Atom = 0x1f606 - Address Atom = 0x4f307 - Align Atom = 0x1105 - Alt Atom = 0x4503 - Annotation Atom = 0x1670a - AnnotationXml Atom = 0x1670e - Applet Atom = 0x2b306 - Area Atom = 0x2fa04 - Article Atom = 0x38807 - Aside Atom = 0x8305 - Async Atom = 0x7b05 - Audio Atom = 0xa605 - Autocomplete Atom = 0x1fc0c - Autofocus Atom = 0xb309 - Autoplay Atom = 0xce08 - B Atom = 0x101 - Base Atom = 0xd604 - Basefont Atom = 0xd608 - Bdi Atom = 0x1a03 - Bdo Atom = 0xe703 - Bgsound Atom = 0x11807 - Big Atom = 0x12403 - Blink Atom = 0x12705 - Blockquote Atom = 0x12c0a - Body Atom = 0x2f04 - Br Atom = 0x202 - Button Atom = 0x13606 - Canvas Atom = 0x7f06 - Caption Atom = 0x1bb07 - Center Atom = 0x5b506 - Challenge Atom = 0x21f09 - Charset Atom = 0x2807 - Checked Atom = 0x32807 - Cite Atom = 0x3c804 - Class Atom = 0x4de05 - Code Atom = 0x14904 - Col Atom = 0x15003 - Colgroup Atom = 0x15008 - Color Atom = 0x15d05 - Cols Atom = 0x16204 - Colspan Atom = 0x16207 - Command Atom = 0x17507 - Content Atom = 0x42307 - Contenteditable Atom = 0x4230f - Contextmenu Atom = 0x3310b - Controls Atom = 0x18808 - Coords Atom = 0x19406 - Crossorigin Atom = 0x19f0b - Data Atom = 0x44a04 - Datalist Atom = 0x44a08 - Datetime Atom = 0x23c08 - Dd Atom = 0x26702 - Default Atom = 0x8607 - Defer Atom = 0x14b05 - Del Atom = 0x3ef03 - Desc Atom = 0x4db04 - Details Atom = 0x4807 - Dfn Atom = 0x6103 - Dialog Atom = 0x1b06 - Dir Atom = 0x6903 - Dirname Atom = 0x6907 - Disabled Atom = 0x10c08 - Div Atom = 0x11303 - Dl Atom = 0x11e02 - Download Atom = 0x40008 - Draggable Atom = 0x17b09 - Dropzone Atom = 0x39108 - Dt Atom = 0x50902 - Em Atom = 0x6502 - Embed Atom = 0x6505 - Enctype Atom = 0x21107 - Face Atom = 0x5b304 - Fieldset Atom = 0x1b008 - Figcaption Atom = 0x1b80a - Figure Atom = 0x1cc06 - Font Atom = 0xda04 - Footer Atom = 0x8d06 - For Atom = 0x1d803 - ForeignObject Atom = 0x1d80d - Foreignobject Atom = 0x1e50d - Form Atom = 0x1f204 - Formaction Atom = 0x1f20a - Formenctype Atom = 0x20d0b - Formmethod Atom = 0x2280a - Formnovalidate Atom = 0x2320e - Formtarget Atom = 0x2470a - Frame Atom = 0x9a05 - Frameset Atom = 0x9a08 - H1 Atom = 0x26e02 - H2 Atom = 0x29402 - H3 Atom = 0x2a702 - H4 Atom = 0x2e902 - H5 Atom = 0x2f302 - H6 Atom = 0x50b02 - Head Atom = 0x2d504 - Header Atom = 0x2d506 - Headers Atom = 0x2d507 - Height Atom = 0x25106 - Hgroup Atom = 0x25906 - Hidden Atom = 0x26506 - High Atom = 0x26b04 - Hr Atom = 0x27002 - Href Atom = 0x27004 - Hreflang Atom = 0x27008 - Html Atom = 0x25504 - HttpEquiv Atom = 0x2780a - I Atom = 0x601 - Icon Atom = 0x42204 - Id Atom = 0x8502 - Iframe Atom = 0x29606 - Image Atom = 0x29c05 - Img Atom = 0x2a103 - Input Atom = 0x3e805 - Inputmode Atom = 0x3e809 - Ins Atom = 0x1a803 - Isindex Atom = 0x2a907 - Ismap Atom = 0x2b005 - Itemid Atom = 0x33c06 - Itemprop Atom = 0x3c908 - Itemref Atom = 0x5ad07 - Itemscope Atom = 0x2b909 - Itemtype Atom = 0x2c308 - Kbd Atom = 0x1903 - Keygen Atom = 0x3906 - Keytype Atom = 0x53707 - Kind Atom = 0x10904 - Label Atom = 0xf005 - Lang Atom = 0x27404 - Legend Atom = 0x18206 - Li Atom = 0x1202 - Link Atom = 0x12804 - List Atom = 0x44e04 - Listing Atom = 0x44e07 - Loop Atom = 0xf404 - Low Atom = 0x11f03 - Malignmark Atom = 0x100a - Manifest Atom = 0x5f108 - Map Atom = 0x2b203 - Mark Atom = 0x1604 - Marquee Atom = 0x2cb07 - Math Atom = 0x2d204 - Max Atom = 0x2e103 - Maxlength Atom = 0x2e109 - Media Atom = 0x6e05 - Mediagroup Atom = 0x6e0a - Menu Atom = 0x33804 - Menuitem Atom = 0x33808 - Meta Atom = 0x45d04 - Meter Atom = 0x24205 - Method Atom = 0x22c06 - Mglyph Atom = 0x2a206 - Mi Atom = 0x2eb02 - Min Atom = 0x2eb03 - Minlength Atom = 0x2eb09 - Mn Atom = 0x23502 - Mo Atom = 0x3ed02 - Ms Atom = 0x2bc02 - Mtext Atom = 0x2f505 - Multiple Atom = 0x30308 - Muted Atom = 0x30b05 - Name Atom = 0x6c04 - Nav Atom = 0x3e03 - Nobr Atom = 0x5704 - Noembed Atom = 0x6307 - Noframes Atom = 0x9808 - Noscript Atom = 0x3d208 - Novalidate Atom = 0x2360a - Object Atom = 0x1ec06 - Ol Atom = 0xc902 - Onabort Atom = 0x13a07 - Onafterprint Atom = 0x1c00c - Onautocomplete Atom = 0x1fa0e - Onautocompleteerror Atom = 0x1fa13 - Onbeforeprint Atom = 0x6040d - Onbeforeunload Atom = 0x4e70e - Onblur Atom = 0xaa06 - Oncancel Atom = 0xe908 - Oncanplay Atom = 0x28509 - Oncanplaythrough Atom = 0x28510 - Onchange Atom = 0x3a708 - Onclick Atom = 0x31007 - Onclose Atom = 0x31707 - Oncontextmenu Atom = 0x32f0d - Oncuechange Atom = 0x3420b - Ondblclick Atom = 0x34d0a - Ondrag Atom = 0x35706 - Ondragend Atom = 0x35709 - Ondragenter Atom = 0x3600b - Ondragleave Atom = 0x36b0b - Ondragover Atom = 0x3760a - Ondragstart Atom = 0x3800b - Ondrop Atom = 0x38f06 - Ondurationchange Atom = 0x39f10 - Onemptied Atom = 0x39609 - Onended Atom = 0x3af07 - Onerror Atom = 0x3b607 - Onfocus Atom = 0x3bd07 - Onhashchange Atom = 0x3da0c - Oninput Atom = 0x3e607 - Oninvalid Atom = 0x3f209 - Onkeydown Atom = 0x3fb09 - Onkeypress Atom = 0x4080a - Onkeyup Atom = 0x41807 - Onlanguagechange Atom = 0x43210 - Onload Atom = 0x44206 - Onloadeddata Atom = 0x4420c - Onloadedmetadata Atom = 0x45510 - Onloadstart Atom = 0x46b0b - Onmessage Atom = 0x47609 - Onmousedown Atom = 0x47f0b - Onmousemove Atom = 0x48a0b - Onmouseout Atom = 0x4950a - Onmouseover Atom = 0x4a20b - Onmouseup Atom = 0x4ad09 - Onmousewheel Atom = 0x4b60c - Onoffline Atom = 0x4c209 - Ononline Atom = 0x4cb08 - Onpagehide Atom = 0x4d30a - Onpageshow Atom = 0x4fe0a - Onpause Atom = 0x50d07 - Onplay Atom = 0x51706 - Onplaying Atom = 0x51709 - Onpopstate Atom = 0x5200a - Onprogress Atom = 0x52a0a - Onratechange Atom = 0x53e0c - Onreset Atom = 0x54a07 - Onresize Atom = 0x55108 - Onscroll Atom = 0x55f08 - Onseeked Atom = 0x56708 - Onseeking Atom = 0x56f09 - Onselect Atom = 0x57808 - Onshow Atom = 0x58206 - Onsort Atom = 0x58b06 - Onstalled Atom = 0x59509 - Onstorage Atom = 0x59e09 - Onsubmit Atom = 0x5a708 - Onsuspend Atom = 0x5bb09 - Ontimeupdate Atom = 0xdb0c - Ontoggle Atom = 0x5c408 - Onunload Atom = 0x5cc08 - Onvolumechange Atom = 0x5d40e - Onwaiting Atom = 0x5e209 - Open Atom = 0x3cf04 - Optgroup Atom = 0xf608 - Optimum Atom = 0x5eb07 - Option Atom = 0x60006 - Output Atom = 0x49c06 - P Atom = 0xc01 - Param Atom = 0xc05 - Pattern Atom = 0x5107 - Ping Atom = 0x7704 - Placeholder Atom = 0xc30b - Plaintext Atom = 0xfd09 - Poster Atom = 0x15706 - Pre Atom = 0x25e03 - Preload Atom = 0x25e07 - Progress Atom = 0x52c08 - Prompt Atom = 0x5fa06 - Public Atom = 0x41e06 - Q Atom = 0x13101 - Radiogroup Atom = 0x30a - Readonly Atom = 0x2fb08 - Rel Atom = 0x25f03 - Required Atom = 0x1d008 - Reversed Atom = 0x5a08 - Rows Atom = 0x9204 - Rowspan Atom = 0x9207 - Rp Atom = 0x1c602 - Rt Atom = 0x13f02 - Ruby Atom = 0xaf04 - S Atom = 0x2c01 - Samp Atom = 0x4e04 - Sandbox Atom = 0xbb07 - Scope Atom = 0x2bd05 - Scoped Atom = 0x2bd06 - Script Atom = 0x3d406 - Seamless Atom = 0x31c08 - Section Atom = 0x4e207 - Select Atom = 0x57a06 - Selected Atom = 0x57a08 - Shape Atom = 0x4f905 - Size Atom = 0x55504 - Sizes Atom = 0x55505 - Small Atom = 0x18f05 - Sortable Atom = 0x58d08 - Sorted Atom = 0x19906 - Source Atom = 0x1aa06 - Spacer Atom = 0x2db06 - Span Atom = 0x9504 - Spellcheck Atom = 0x3230a - Src Atom = 0x3c303 - Srcdoc Atom = 0x3c306 - Srclang Atom = 0x41107 - Start Atom = 0x38605 - Step Atom = 0x5f704 - Strike Atom = 0x53306 - Strong Atom = 0x55906 - Style Atom = 0x61105 - Sub Atom = 0x5a903 - Summary Atom = 0x61607 - Sup Atom = 0x61d03 - Svg Atom = 0x62003 - System Atom = 0x62306 - Tabindex Atom = 0x46308 - Table Atom = 0x42d05 - Target Atom = 0x24b06 - Tbody Atom = 0x2e05 - Td Atom = 0x4702 - Template Atom = 0x62608 - Textarea Atom = 0x2f608 - Tfoot Atom = 0x8c05 - Th Atom = 0x22e02 - Thead Atom = 0x2d405 - Time Atom = 0xdd04 - Title Atom = 0xa105 - Tr Atom = 0x10502 - Track Atom = 0x10505 - Translate Atom = 0x14009 - Tt Atom = 0x5302 - Type Atom = 0x21404 - Typemustmatch Atom = 0x2140d - U Atom = 0xb01 - Ul Atom = 0x8a02 - Usemap Atom = 0x51106 - Value Atom = 0x4005 - Var Atom = 0x11503 - Video Atom = 0x28105 - Wbr Atom = 0x12103 - Width Atom = 0x50705 - Wrap Atom = 0x58704 - Xmp Atom = 0xc103 -) - -const hash0 = 0xc17da63e - -const maxAtomLen = 19 - -var table = [1 << 9]Atom{ - 0x1: 0x48a0b, // onmousemove - 0x2: 0x5e209, // onwaiting - 0x3: 0x1fa13, // onautocompleteerror - 0x4: 0x5fa06, // prompt - 0x7: 0x5eb07, // optimum - 0x8: 0x1604, // mark - 0xa: 0x5ad07, // itemref - 0xb: 0x4fe0a, // onpageshow - 0xc: 0x57a06, // select - 0xd: 0x17b09, // draggable - 0xe: 0x3e03, // nav - 0xf: 0x17507, // command - 0x11: 0xb01, // u - 0x14: 0x2d507, // headers - 0x15: 0x44a08, // datalist - 0x17: 0x4e04, // samp - 0x1a: 0x3fb09, // onkeydown - 0x1b: 0x55f08, // onscroll - 0x1c: 0x15003, // col - 0x20: 0x3c908, // itemprop - 0x21: 0x2780a, // http-equiv - 0x22: 0x61d03, // sup - 0x24: 0x1d008, // required - 0x2b: 0x25e07, // preload - 0x2c: 0x6040d, // onbeforeprint - 0x2d: 0x3600b, // ondragenter - 0x2e: 0x50902, // dt - 0x2f: 0x5a708, // onsubmit - 0x30: 0x27002, // hr - 0x31: 0x32f0d, // oncontextmenu - 0x33: 0x29c05, // image - 0x34: 0x50d07, // onpause - 0x35: 0x25906, // hgroup - 0x36: 0x7704, // ping - 0x37: 0x57808, // onselect - 0x3a: 0x11303, // div - 0x3b: 0x1fa0e, // onautocomplete - 0x40: 0x2eb02, // mi - 0x41: 0x31c08, // seamless - 0x42: 0x2807, // charset - 0x43: 0x8502, // id - 0x44: 0x5200a, // onpopstate - 0x45: 0x3ef03, // del - 0x46: 0x2cb07, // marquee - 0x47: 0x3309, // accesskey - 0x49: 0x8d06, // footer - 0x4a: 0x44e04, // list - 0x4b: 0x2b005, // ismap - 0x51: 0x33804, // menu - 0x52: 0x2f04, // body - 0x55: 0x9a08, // frameset - 0x56: 0x54a07, // onreset - 0x57: 0x12705, // blink - 0x58: 0xa105, // title - 0x59: 0x38807, // article - 0x5b: 0x22e02, // th - 0x5d: 0x13101, // q - 0x5e: 0x3cf04, // open - 0x5f: 0x2fa04, // area - 0x61: 0x44206, // onload - 0x62: 0xda04, // font - 0x63: 0xd604, // base - 0x64: 0x16207, // colspan - 0x65: 0x53707, // keytype - 0x66: 0x11e02, // dl - 0x68: 0x1b008, // fieldset - 0x6a: 0x2eb03, // min - 0x6b: 0x11503, // var - 0x6f: 0x2d506, // header - 0x70: 0x13f02, // rt - 0x71: 0x15008, // colgroup - 0x72: 0x23502, // mn - 0x74: 0x13a07, // onabort - 0x75: 0x3906, // keygen - 0x76: 0x4c209, // onoffline - 0x77: 0x21f09, // challenge - 0x78: 0x2b203, // map - 0x7a: 0x2e902, // h4 - 0x7b: 0x3b607, // onerror - 0x7c: 0x2e109, // maxlength - 0x7d: 0x2f505, // mtext - 0x7e: 0xbb07, // sandbox - 0x7f: 0x58b06, // onsort - 0x80: 0x100a, // malignmark - 0x81: 0x45d04, // meta - 0x82: 0x7b05, // async - 0x83: 0x2a702, // h3 - 0x84: 0x26702, // dd - 0x85: 0x27004, // href - 0x86: 0x6e0a, // mediagroup - 0x87: 0x19406, // coords - 0x88: 0x41107, // srclang - 0x89: 0x34d0a, // ondblclick - 0x8a: 0x4005, // value - 0x8c: 0xe908, // oncancel - 0x8e: 0x3230a, // spellcheck - 0x8f: 0x9a05, // frame - 0x91: 0x12403, // big - 0x94: 0x1f606, // action - 0x95: 0x6903, // dir - 0x97: 0x2fb08, // readonly - 0x99: 0x42d05, // table - 0x9a: 0x61607, // summary - 0x9b: 0x12103, // wbr - 0x9c: 0x30a, // radiogroup - 0x9d: 0x6c04, // name - 0x9f: 0x62306, // system - 0xa1: 0x15d05, // color - 0xa2: 0x7f06, // canvas - 0xa3: 0x25504, // html - 0xa5: 0x56f09, // onseeking - 0xac: 0x4f905, // shape - 0xad: 0x25f03, // rel - 0xae: 0x28510, // oncanplaythrough - 0xaf: 0x3760a, // ondragover - 0xb0: 0x62608, // template - 0xb1: 0x1d80d, // foreignObject - 0xb3: 0x9204, // rows - 0xb6: 0x44e07, // listing - 0xb7: 0x49c06, // output - 0xb9: 0x3310b, // contextmenu - 0xbb: 0x11f03, // low - 0xbc: 0x1c602, // rp - 0xbd: 0x5bb09, // onsuspend - 0xbe: 0x13606, // button - 0xbf: 0x4db04, // desc - 0xc1: 0x4e207, // section - 0xc2: 0x52a0a, // onprogress - 0xc3: 0x59e09, // onstorage - 0xc4: 0x2d204, // math - 0xc5: 0x4503, // alt - 0xc7: 0x8a02, // ul - 0xc8: 0x5107, // pattern - 0xc9: 0x4b60c, // onmousewheel - 0xca: 0x35709, // ondragend - 0xcb: 0xaf04, // ruby - 0xcc: 0xc01, // p - 0xcd: 0x31707, // onclose - 0xce: 0x24205, // meter - 0xcf: 0x11807, // bgsound - 0xd2: 0x25106, // height - 0xd4: 0x101, // b - 0xd5: 0x2c308, // itemtype - 0xd8: 0x1bb07, // caption - 0xd9: 0x10c08, // disabled - 0xdb: 0x33808, // menuitem - 0xdc: 0x62003, // svg - 0xdd: 0x18f05, // small - 0xde: 0x44a04, // data - 0xe0: 0x4cb08, // ononline - 0xe1: 0x2a206, // mglyph - 0xe3: 0x6505, // embed - 0xe4: 0x10502, // tr - 0xe5: 0x46b0b, // onloadstart - 0xe7: 0x3c306, // srcdoc - 0xeb: 0x5c408, // ontoggle - 0xed: 0xe703, // bdo - 0xee: 0x4702, // td - 0xef: 0x8305, // aside - 0xf0: 0x29402, // h2 - 0xf1: 0x52c08, // progress - 0xf2: 0x12c0a, // blockquote - 0xf4: 0xf005, // label - 0xf5: 0x601, // i - 0xf7: 0x9207, // rowspan - 0xfb: 0x51709, // onplaying - 0xfd: 0x2a103, // img - 0xfe: 0xf608, // optgroup - 0xff: 0x42307, // content - 0x101: 0x53e0c, // onratechange - 0x103: 0x3da0c, // onhashchange - 0x104: 0x4807, // details - 0x106: 0x40008, // download - 0x109: 0x14009, // translate - 0x10b: 0x4230f, // contenteditable - 0x10d: 0x36b0b, // ondragleave - 0x10e: 0x2106, // accept - 0x10f: 0x57a08, // selected - 0x112: 0x1f20a, // formaction - 0x113: 0x5b506, // center - 0x115: 0x45510, // onloadedmetadata - 0x116: 0x12804, // link - 0x117: 0xdd04, // time - 0x118: 0x19f0b, // crossorigin - 0x119: 0x3bd07, // onfocus - 0x11a: 0x58704, // wrap - 0x11b: 0x42204, // icon - 0x11d: 0x28105, // video - 0x11e: 0x4de05, // class - 0x121: 0x5d40e, // onvolumechange - 0x122: 0xaa06, // onblur - 0x123: 0x2b909, // itemscope - 0x124: 0x61105, // style - 0x127: 0x41e06, // public - 0x129: 0x2320e, // formnovalidate - 0x12a: 0x58206, // onshow - 0x12c: 0x51706, // onplay - 0x12d: 0x3c804, // cite - 0x12e: 0x2bc02, // ms - 0x12f: 0xdb0c, // ontimeupdate - 0x130: 0x10904, // kind - 0x131: 0x2470a, // formtarget - 0x135: 0x3af07, // onended - 0x136: 0x26506, // hidden - 0x137: 0x2c01, // s - 0x139: 0x2280a, // formmethod - 0x13a: 0x3e805, // input - 0x13c: 0x50b02, // h6 - 0x13d: 0xc902, // ol - 0x13e: 0x3420b, // oncuechange - 0x13f: 0x1e50d, // foreignobject - 0x143: 0x4e70e, // onbeforeunload - 0x144: 0x2bd05, // scope - 0x145: 0x39609, // onemptied - 0x146: 0x14b05, // defer - 0x147: 0xc103, // xmp - 0x148: 0x39f10, // ondurationchange - 0x149: 0x1903, // kbd - 0x14c: 0x47609, // onmessage - 0x14d: 0x60006, // option - 0x14e: 0x2eb09, // minlength - 0x14f: 0x32807, // checked - 0x150: 0xce08, // autoplay - 0x152: 0x202, // br - 0x153: 0x2360a, // novalidate - 0x156: 0x6307, // noembed - 0x159: 0x31007, // onclick - 0x15a: 0x47f0b, // onmousedown - 0x15b: 0x3a708, // onchange - 0x15e: 0x3f209, // oninvalid - 0x15f: 0x2bd06, // scoped - 0x160: 0x18808, // controls - 0x161: 0x30b05, // muted - 0x162: 0x58d08, // sortable - 0x163: 0x51106, // usemap - 0x164: 0x1b80a, // figcaption - 0x165: 0x35706, // ondrag - 0x166: 0x26b04, // high - 0x168: 0x3c303, // src - 0x169: 0x15706, // poster - 0x16b: 0x1670e, // annotation-xml - 0x16c: 0x5f704, // step - 0x16d: 0x4, // abbr - 0x16e: 0x1b06, // dialog - 0x170: 0x1202, // li - 0x172: 0x3ed02, // mo - 0x175: 0x1d803, // for - 0x176: 0x1a803, // ins - 0x178: 0x55504, // size - 0x179: 0x43210, // onlanguagechange - 0x17a: 0x8607, // default - 0x17b: 0x1a03, // bdi - 0x17c: 0x4d30a, // onpagehide - 0x17d: 0x6907, // dirname - 0x17e: 0x21404, // type - 0x17f: 0x1f204, // form - 0x181: 0x28509, // oncanplay - 0x182: 0x6103, // dfn - 0x183: 0x46308, // tabindex - 0x186: 0x6502, // em - 0x187: 0x27404, // lang - 0x189: 0x39108, // dropzone - 0x18a: 0x4080a, // onkeypress - 0x18b: 0x23c08, // datetime - 0x18c: 0x16204, // cols - 0x18d: 0x1, // a - 0x18e: 0x4420c, // onloadeddata - 0x190: 0xa605, // audio - 0x192: 0x2e05, // tbody - 0x193: 0x22c06, // method - 0x195: 0xf404, // loop - 0x196: 0x29606, // iframe - 0x198: 0x2d504, // head - 0x19e: 0x5f108, // manifest - 0x19f: 0xb309, // autofocus - 0x1a0: 0x14904, // code - 0x1a1: 0x55906, // strong - 0x1a2: 0x30308, // multiple - 0x1a3: 0xc05, // param - 0x1a6: 0x21107, // enctype - 0x1a7: 0x5b304, // face - 0x1a8: 0xfd09, // plaintext - 0x1a9: 0x26e02, // h1 - 0x1aa: 0x59509, // onstalled - 0x1ad: 0x3d406, // script - 0x1ae: 0x2db06, // spacer - 0x1af: 0x55108, // onresize - 0x1b0: 0x4a20b, // onmouseover - 0x1b1: 0x5cc08, // onunload - 0x1b2: 0x56708, // onseeked - 0x1b4: 0x2140d, // typemustmatch - 0x1b5: 0x1cc06, // figure - 0x1b6: 0x4950a, // onmouseout - 0x1b7: 0x25e03, // pre - 0x1b8: 0x50705, // width - 0x1b9: 0x19906, // sorted - 0x1bb: 0x5704, // nobr - 0x1be: 0x5302, // tt - 0x1bf: 0x1105, // align - 0x1c0: 0x3e607, // oninput - 0x1c3: 0x41807, // onkeyup - 0x1c6: 0x1c00c, // onafterprint - 0x1c7: 0x210e, // accept-charset - 0x1c8: 0x33c06, // itemid - 0x1c9: 0x3e809, // inputmode - 0x1cb: 0x53306, // strike - 0x1cc: 0x5a903, // sub - 0x1cd: 0x10505, // track - 0x1ce: 0x38605, // start - 0x1d0: 0xd608, // basefont - 0x1d6: 0x1aa06, // source - 0x1d7: 0x18206, // legend - 0x1d8: 0x2d405, // thead - 0x1da: 0x8c05, // tfoot - 0x1dd: 0x1ec06, // object - 0x1de: 0x6e05, // media - 0x1df: 0x1670a, // annotation - 0x1e0: 0x20d0b, // formenctype - 0x1e2: 0x3d208, // noscript - 0x1e4: 0x55505, // sizes - 0x1e5: 0x1fc0c, // autocomplete - 0x1e6: 0x9504, // span - 0x1e7: 0x9808, // noframes - 0x1e8: 0x24b06, // target - 0x1e9: 0x38f06, // ondrop - 0x1ea: 0x2b306, // applet - 0x1ec: 0x5a08, // reversed - 0x1f0: 0x2a907, // isindex - 0x1f3: 0x27008, // hreflang - 0x1f5: 0x2f302, // h5 - 0x1f6: 0x4f307, // address - 0x1fa: 0x2e103, // max - 0x1fb: 0xc30b, // placeholder - 0x1fc: 0x2f608, // textarea - 0x1fe: 0x4ad09, // onmouseup - 0x1ff: 0x3800b, // ondragstart -} - -const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" + - "genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" + - "ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" + - "utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" + - "labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" + - "blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" + - "nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" + - "originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" + - "bjectforeignobjectformactionautocompleteerrorformenctypemust" + - "matchallengeformmethodformnovalidatetimeterformtargetheightm" + - "lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" + - "h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" + - "eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" + - "utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" + - "hangeondblclickondragendondragenterondragleaveondragoverondr" + - "agstarticleondropzonemptiedondurationchangeonendedonerroronf" + - "ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" + - "nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" + - "uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" + - "rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" + - "ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" + - "oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" + - "teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" + - "ollonseekedonseekingonselectedonshowraponsortableonstalledon" + - "storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" + - "changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" + - "mmarysupsvgsystemplate" diff --git a/vendor/golang.org/x/net/html/atom/table_test.go b/vendor/golang.org/x/net/html/atom/table_test.go deleted file mode 100644 index 0f2ecce4..00000000 --- a/vendor/golang.org/x/net/html/atom/table_test.go +++ /dev/null @@ -1,351 +0,0 @@ -// generated by go run gen.go -test; DO NOT EDIT - -package atom - -var testAtomList = []string{ - "a", - "abbr", - "abbr", - "accept", - "accept-charset", - "accesskey", - "action", - "address", - "align", - "alt", - "annotation", - "annotation-xml", - "applet", - "area", - "article", - "aside", - "async", - "audio", - "autocomplete", - "autofocus", - "autoplay", - "b", - "base", - "basefont", - "bdi", - "bdo", - "bgsound", - "big", - "blink", - "blockquote", - "body", - "br", - "button", - "canvas", - "caption", - "center", - "challenge", - "charset", - "checked", - "cite", - "cite", - "class", - "code", - "col", - "colgroup", - "color", - "cols", - "colspan", - "command", - "command", - "content", - "contenteditable", - "contextmenu", - "controls", - "coords", - "crossorigin", - "data", - "data", - "datalist", - "datetime", - "dd", - "default", - "defer", - "del", - "desc", - "details", - "dfn", - "dialog", - "dir", - "dirname", - "disabled", - "div", - "dl", - "download", - "draggable", - "dropzone", - "dt", - "em", - "embed", - "enctype", - "face", - "fieldset", - "figcaption", - "figure", - "font", - "footer", - "for", - "foreignObject", - "foreignobject", - "form", - "form", - "formaction", - "formenctype", - "formmethod", - "formnovalidate", - "formtarget", - "frame", - "frameset", - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "head", - "header", - "headers", - "height", - "hgroup", - "hidden", - "high", - "hr", - "href", - "hreflang", - "html", - "http-equiv", - "i", - "icon", - "id", - "iframe", - "image", - "img", - "input", - "inputmode", - "ins", - "isindex", - "ismap", - "itemid", - "itemprop", - "itemref", - "itemscope", - "itemtype", - "kbd", - "keygen", - "keytype", - "kind", - "label", - "label", - "lang", - "legend", - "li", - "link", - "list", - "listing", - "loop", - "low", - "malignmark", - "manifest", - "map", - "mark", - "marquee", - "math", - "max", - "maxlength", - "media", - "mediagroup", - "menu", - "menuitem", - "meta", - "meter", - "method", - "mglyph", - "mi", - "min", - "minlength", - "mn", - "mo", - "ms", - "mtext", - "multiple", - "muted", - "name", - "nav", - "nobr", - "noembed", - "noframes", - "noscript", - "novalidate", - "object", - "ol", - "onabort", - "onafterprint", - "onautocomplete", - "onautocompleteerror", - "onbeforeprint", - "onbeforeunload", - "onblur", - "oncancel", - "oncanplay", - "oncanplaythrough", - "onchange", - "onclick", - "onclose", - "oncontextmenu", - "oncuechange", - "ondblclick", - "ondrag", - "ondragend", - "ondragenter", - "ondragleave", - "ondragover", - "ondragstart", - "ondrop", - "ondurationchange", - "onemptied", - "onended", - "onerror", - "onfocus", - "onhashchange", - "oninput", - "oninvalid", - "onkeydown", - "onkeypress", - "onkeyup", - "onlanguagechange", - "onload", - "onloadeddata", - "onloadedmetadata", - "onloadstart", - "onmessage", - "onmousedown", - "onmousemove", - "onmouseout", - "onmouseover", - "onmouseup", - "onmousewheel", - "onoffline", - "ononline", - "onpagehide", - "onpageshow", - "onpause", - "onplay", - "onplaying", - "onpopstate", - "onprogress", - "onratechange", - "onreset", - "onresize", - "onscroll", - "onseeked", - "onseeking", - "onselect", - "onshow", - "onsort", - "onstalled", - "onstorage", - "onsubmit", - "onsuspend", - "ontimeupdate", - "ontoggle", - "onunload", - "onvolumechange", - "onwaiting", - "open", - "optgroup", - "optimum", - "option", - "output", - "p", - "param", - "pattern", - "ping", - "placeholder", - "plaintext", - "poster", - "pre", - "preload", - "progress", - "prompt", - "public", - "q", - "radiogroup", - "readonly", - "rel", - "required", - "reversed", - "rows", - "rowspan", - "rp", - "rt", - "ruby", - "s", - "samp", - "sandbox", - "scope", - "scoped", - "script", - "seamless", - "section", - "select", - "selected", - "shape", - "size", - "sizes", - "small", - "sortable", - "sorted", - "source", - "spacer", - "span", - "span", - "spellcheck", - "src", - "srcdoc", - "srclang", - "start", - "step", - "strike", - "strong", - "style", - "style", - "sub", - "summary", - "sup", - "svg", - "system", - "tabindex", - "table", - "target", - "tbody", - "td", - "template", - "textarea", - "tfoot", - "th", - "thead", - "time", - "title", - "title", - "tr", - "track", - "translate", - "tt", - "type", - "typemustmatch", - "u", - "ul", - "usemap", - "value", - "var", - "video", - "wbr", - "width", - "wrap", - "xmp", -} diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go deleted file mode 100644 index 13bed159..00000000 --- a/vendor/golang.org/x/net/html/charset/charset.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package charset provides common text encodings for HTML documents. -// -// The mapping from encoding labels to encodings is defined at -// https://encoding.spec.whatwg.org/. -package charset // import "golang.org/x/net/html/charset" - -import ( - "bytes" - "fmt" - "io" - "mime" - "strings" - "unicode/utf8" - - "golang.org/x/net/html" - "golang.org/x/text/encoding" - "golang.org/x/text/encoding/charmap" - "golang.org/x/text/encoding/htmlindex" - "golang.org/x/text/transform" -) - -// Lookup returns the encoding with the specified label, and its canonical -// name. It returns nil and the empty string if label is not one of the -// standard encodings for HTML. Matching is case-insensitive and ignores -// leading and trailing whitespace. Encoders will use HTML escape sequences for -// runes that are not supported by the character set. -func Lookup(label string) (e encoding.Encoding, name string) { - e, err := htmlindex.Get(label) - if err != nil { - return nil, "" - } - name, _ = htmlindex.Name(e) - return &htmlEncoding{e}, name -} - -type htmlEncoding struct{ encoding.Encoding } - -func (h *htmlEncoding) NewEncoder() *encoding.Encoder { - // HTML requires a non-terminating legacy encoder. We use HTML escapes to - // substitute unsupported code points. - return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder()) -} - -// DetermineEncoding determines the encoding of an HTML document by examining -// up to the first 1024 bytes of content and the declared Content-Type. -// -// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding -func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) { - if len(content) > 1024 { - content = content[:1024] - } - - for _, b := range boms { - if bytes.HasPrefix(content, b.bom) { - e, name = Lookup(b.enc) - return e, name, true - } - } - - if _, params, err := mime.ParseMediaType(contentType); err == nil { - if cs, ok := params["charset"]; ok { - if e, name = Lookup(cs); e != nil { - return e, name, true - } - } - } - - if len(content) > 0 { - e, name = prescan(content) - if e != nil { - return e, name, false - } - } - - // Try to detect UTF-8. - // First eliminate any partial rune at the end. - for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { - b := content[i] - if b < 0x80 { - break - } - if utf8.RuneStart(b) { - content = content[:i] - break - } - } - hasHighBit := false - for _, c := range content { - if c >= 0x80 { - hasHighBit = true - break - } - } - if hasHighBit && utf8.Valid(content) { - return encoding.Nop, "utf-8", false - } - - // TODO: change default depending on user's locale? - return charmap.Windows1252, "windows-1252", false -} - -// NewReader returns an io.Reader that converts the content of r to UTF-8. -// It calls DetermineEncoding to find out what r's encoding is. -func NewReader(r io.Reader, contentType string) (io.Reader, error) { - preview := make([]byte, 1024) - n, err := io.ReadFull(r, preview) - switch { - case err == io.ErrUnexpectedEOF: - preview = preview[:n] - r = bytes.NewReader(preview) - case err != nil: - return nil, err - default: - r = io.MultiReader(bytes.NewReader(preview), r) - } - - if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop { - r = transform.NewReader(r, e.NewDecoder()) - } - return r, nil -} - -// NewReaderLabel returns a reader that converts from the specified charset to -// UTF-8. It uses Lookup to find the encoding that corresponds to label, and -// returns an error if Lookup returns nil. It is suitable for use as -// encoding/xml.Decoder's CharsetReader function. -func NewReaderLabel(label string, input io.Reader) (io.Reader, error) { - e, _ := Lookup(label) - if e == nil { - return nil, fmt.Errorf("unsupported charset: %q", label) - } - return transform.NewReader(input, e.NewDecoder()), nil -} - -func prescan(content []byte) (e encoding.Encoding, name string) { - z := html.NewTokenizer(bytes.NewReader(content)) - for { - switch z.Next() { - case html.ErrorToken: - return nil, "" - - case html.StartTagToken, html.SelfClosingTagToken: - tagName, hasAttr := z.TagName() - if !bytes.Equal(tagName, []byte("meta")) { - continue - } - attrList := make(map[string]bool) - gotPragma := false - - const ( - dontKnow = iota - doNeedPragma - doNotNeedPragma - ) - needPragma := dontKnow - - name = "" - e = nil - for hasAttr { - var key, val []byte - key, val, hasAttr = z.TagAttr() - ks := string(key) - if attrList[ks] { - continue - } - attrList[ks] = true - for i, c := range val { - if 'A' <= c && c <= 'Z' { - val[i] = c + 0x20 - } - } - - switch ks { - case "http-equiv": - if bytes.Equal(val, []byte("content-type")) { - gotPragma = true - } - - case "content": - if e == nil { - name = fromMetaElement(string(val)) - if name != "" { - e, name = Lookup(name) - if e != nil { - needPragma = doNeedPragma - } - } - } - - case "charset": - e, name = Lookup(string(val)) - needPragma = doNotNeedPragma - } - } - - if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { - continue - } - - if strings.HasPrefix(name, "utf-16") { - name = "utf-8" - e = encoding.Nop - } - - if e != nil { - return e, name - } - } - } -} - -func fromMetaElement(s string) string { - for s != "" { - csLoc := strings.Index(s, "charset") - if csLoc == -1 { - return "" - } - s = s[csLoc+len("charset"):] - s = strings.TrimLeft(s, " \t\n\f\r") - if !strings.HasPrefix(s, "=") { - continue - } - s = s[1:] - s = strings.TrimLeft(s, " \t\n\f\r") - if s == "" { - return "" - } - if q := s[0]; q == '"' || q == '\'' { - s = s[1:] - closeQuote := strings.IndexRune(s, rune(q)) - if closeQuote == -1 { - return "" - } - return s[:closeQuote] - } - - end := strings.IndexAny(s, "; \t\n\f\r") - if end == -1 { - end = len(s) - } - return s[:end] - } - return "" -} - -var boms = []struct { - bom []byte - enc string -}{ - {[]byte{0xfe, 0xff}, "utf-16be"}, - {[]byte{0xff, 0xfe}, "utf-16le"}, - {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, -} diff --git a/vendor/golang.org/x/net/html/charset/charset_test.go b/vendor/golang.org/x/net/html/charset/charset_test.go deleted file mode 100644 index e4e7d86b..00000000 --- a/vendor/golang.org/x/net/html/charset/charset_test.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package charset - -import ( - "bytes" - "encoding/xml" - "io/ioutil" - "runtime" - "strings" - "testing" - - "golang.org/x/text/transform" -) - -func transformString(t transform.Transformer, s string) (string, error) { - r := transform.NewReader(strings.NewReader(s), t) - b, err := ioutil.ReadAll(r) - return string(b), err -} - -type testCase struct { - utf8, other, otherEncoding string -} - -// testCases for encoding and decoding. -var testCases = []testCase{ - {"Résumé", "Résumé", "utf8"}, - {"Résumé", "R\xe9sum\xe9", "latin1"}, - {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "S0\x8c0o0\"oW[g0Y0\x020", "UTF-16LE"}, - {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "0S0\x8c0oo\"[W0g0Y0\x02", "UTF-16BE"}, - {"Hello, world", "Hello, world", "ASCII"}, - {"GdaÅ„sk", "Gda\xf1sk", "ISO-8859-2"}, - {"Ââ ÄŒÄ ÄÄ‘ ÅŠÅ‹ Õõ Å Å¡ Žž Ã…Ã¥ Ää", "\xc2\xe2 \xc8\xe8 \xa9\xb9 \xaf\xbf \xd5\xf5 \xaa\xba \xac\xbc \xc5\xe5 \xc4\xe4", "ISO-8859-10"}, - {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "ISO-8859-11"}, - {"latvieÅ¡u", "latvie\xf0u", "ISO-8859-13"}, - {"Seònaid", "Se\xf2naid", "ISO-8859-14"}, - {"€1 is cheap", "\xa41 is cheap", "ISO-8859-15"}, - {"româneÈ™te", "rom\xe2ne\xbate", "ISO-8859-16"}, - {"nutraĵo", "nutra\xbco", "ISO-8859-3"}, - {"Kalâdlit", "Kal\xe2dlit", "ISO-8859-4"}, - {"руÑÑкий", "\xe0\xe3\xe1\xe1\xda\xd8\xd9", "ISO-8859-5"}, - {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "ISO-8859-7"}, - {"KaÄŸan", "Ka\xf0an", "ISO-8859-9"}, - {"Résumé", "R\x8esum\x8e", "macintosh"}, - {"GdaÅ„sk", "Gda\xf1sk", "windows-1250"}, - {"руÑÑкий", "\xf0\xf3\xf1\xf1\xea\xe8\xe9", "windows-1251"}, - {"Résumé", "R\xe9sum\xe9", "windows-1252"}, - {"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "windows-1253"}, - {"KaÄŸan", "Ka\xf0an", "windows-1254"}, - {"עִבְרִית", "\xf2\xc4\xe1\xc0\xf8\xc4\xe9\xfa", "windows-1255"}, - {"العربية", "\xc7\xe1\xda\xd1\xc8\xed\xc9", "windows-1256"}, - {"latvieÅ¡u", "latvie\xf0u", "windows-1257"}, - {"Việt", "Vi\xea\xf2t", "windows-1258"}, - {"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "windows-874"}, - {"руÑÑкий", "\xd2\xd5\xd3\xd3\xcb\xc9\xca", "KOI8-R"}, - {"українÑька", "\xd5\xcb\xd2\xc1\xa7\xce\xd3\xd8\xcb\xc1", "KOI8-U"}, - {"Hello 常用國字標準字體表", "Hello \xb1`\xa5\u03b0\xea\xa6r\xbc\u0437\u01e6r\xc5\xe9\xaa\xed", "big5"}, - {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gbk"}, - {"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gb18030"}, - {"עִבְרִית", "\x81\x30\xfb\x30\x81\x30\xf6\x34\x81\x30\xf9\x33\x81\x30\xf6\x30\x81\x30\xfb\x36\x81\x30\xf6\x34\x81\x30\xfa\x31\x81\x30\xfb\x38", "gb18030"}, - {"㧯", "\x82\x31\x89\x38", "gb18030"}, - {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "\x82\xb1\x82\xea\x82\xcd\x8a\xbf\x8e\x9a\x82\xc5\x82\xb7\x81B", "SJIS"}, - {"Hello, 世界!", "Hello, \x90\xa2\x8aE!", "SJIS"}, - {"イウエオカ", "\xb2\xb3\xb4\xb5\xb6", "SJIS"}, - {"ã“ã‚Œã¯æ¼¢å­—ã§ã™ã€‚", "\xa4\xb3\xa4\xec\xa4\u03f4\xc1\xbb\xfa\xa4\u01e4\xb9\xa1\xa3", "EUC-JP"}, - {"Hello, 世界!", "Hello, \x1b$B@$3&\x1b(B!", "ISO-2022-JP"}, - {"다ìŒê³¼ ê°™ì€ ì¡°ê±´ì„ ë”°ë¼ì•¼ 합니다: 저작ìží‘œì‹œ", "\xb4\xd9\xc0\xbd\xb0\xfa \xb0\xb0\xc0\xba \xc1\xb6\xb0\xc7\xc0\xbb \xb5\xfb\xb6\xf3\xbe\xdf \xc7Õ´Ï´\xd9: \xc0\xfa\xc0\xdb\xc0\xdaÇ¥\xbd\xc3", "EUC-KR"}, -} - -func TestDecode(t *testing.T) { - testCases := append(testCases, []testCase{ - // Replace multi-byte maximum subpart of ill-formed subsequence with - // single replacement character (WhatWG requirement). - {"Rés\ufffdumé", "Rés\xe1\x80umé", "utf8"}, - }...) - for _, tc := range testCases { - e, _ := Lookup(tc.otherEncoding) - if e == nil { - t.Errorf("%s: not found", tc.otherEncoding) - continue - } - s, err := transformString(e.NewDecoder(), tc.other) - if err != nil { - t.Errorf("%s: decode %q: %v", tc.otherEncoding, tc.other, err) - continue - } - if s != tc.utf8 { - t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.utf8) - } - } -} - -func TestEncode(t *testing.T) { - testCases := append(testCases, []testCase{ - // Use Go-style replacement. - {"Rés\xe1\x80umé", "Rés\ufffd\ufffdumé", "utf8"}, - // U+0144 LATIN SMALL LETTER N WITH ACUTE not supported by encoding. - {"GdaÅ„sk", "Gdańsk", "ISO-8859-11"}, - {"\ufffd", "�", "ISO-8859-11"}, - {"a\xe1\x80b", "a��b", "ISO-8859-11"}, - }...) - for _, tc := range testCases { - e, _ := Lookup(tc.otherEncoding) - if e == nil { - t.Errorf("%s: not found", tc.otherEncoding) - continue - } - s, err := transformString(e.NewEncoder(), tc.utf8) - if err != nil { - t.Errorf("%s: encode %q: %s", tc.otherEncoding, tc.utf8, err) - continue - } - if s != tc.other { - t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.other) - } - } -} - -var sniffTestCases = []struct { - filename, declared, want string -}{ - {"HTTP-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, - {"UTF-16LE-BOM.html", "", "utf-16le"}, - {"UTF-16BE-BOM.html", "", "utf-16be"}, - {"meta-content-attribute.html", "text/html", "iso-8859-15"}, - {"meta-charset-attribute.html", "text/html", "iso-8859-15"}, - {"No-encoding-declaration.html", "text/html", "utf-8"}, - {"HTTP-vs-UTF-8-BOM.html", "text/html; charset=iso-8859-15", "utf-8"}, - {"HTTP-vs-meta-content.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, - {"HTTP-vs-meta-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"}, - {"UTF-8-BOM-vs-meta-content.html", "text/html", "utf-8"}, - {"UTF-8-BOM-vs-meta-charset.html", "text/html", "utf-8"}, -} - -func TestSniff(t *testing.T) { - switch runtime.GOOS { - case "nacl": // platforms that don't permit direct file system access - t.Skipf("not supported on %q", runtime.GOOS) - } - - for _, tc := range sniffTestCases { - content, err := ioutil.ReadFile("testdata/" + tc.filename) - if err != nil { - t.Errorf("%s: error reading file: %v", tc.filename, err) - continue - } - - _, name, _ := DetermineEncoding(content, tc.declared) - if name != tc.want { - t.Errorf("%s: got %q, want %q", tc.filename, name, tc.want) - continue - } - } -} - -func TestReader(t *testing.T) { - switch runtime.GOOS { - case "nacl": // platforms that don't permit direct file system access - t.Skipf("not supported on %q", runtime.GOOS) - } - - for _, tc := range sniffTestCases { - content, err := ioutil.ReadFile("testdata/" + tc.filename) - if err != nil { - t.Errorf("%s: error reading file: %v", tc.filename, err) - continue - } - - r, err := NewReader(bytes.NewReader(content), tc.declared) - if err != nil { - t.Errorf("%s: error creating reader: %v", tc.filename, err) - continue - } - - got, err := ioutil.ReadAll(r) - if err != nil { - t.Errorf("%s: error reading from charset.NewReader: %v", tc.filename, err) - continue - } - - e, _ := Lookup(tc.want) - want, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder())) - if err != nil { - t.Errorf("%s: error decoding with hard-coded charset name: %v", tc.filename, err) - continue - } - - if !bytes.Equal(got, want) { - t.Errorf("%s: got %q, want %q", tc.filename, got, want) - continue - } - } -} - -var metaTestCases = []struct { - meta, want string -}{ - {"", ""}, - {"text/html", ""}, - {"text/html; charset utf-8", ""}, - {"text/html; charset=latin-2", "latin-2"}, - {"text/html; charset; charset = utf-8", "utf-8"}, - {`charset="big5"`, "big5"}, - {"charset='shift_jis'", "shift_jis"}, -} - -func TestFromMeta(t *testing.T) { - for _, tc := range metaTestCases { - got := fromMetaElement(tc.meta) - if got != tc.want { - t.Errorf("%q: got %q, want %q", tc.meta, got, tc.want) - } - } -} - -func TestXML(t *testing.T) { - const s = "r\xe9sum\xe9" - - d := xml.NewDecoder(strings.NewReader(s)) - d.CharsetReader = NewReaderLabel - - var a struct { - Word string - } - err := d.Decode(&a) - if err != nil { - t.Fatalf("Decode: %v", err) - } - - want := "résumé" - if a.Word != want { - t.Errorf("got %q, want %q", a.Word, want) - } -} diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html deleted file mode 100644 index 9915fa0e..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - HTTP charset - - - - - - - - - - - -

HTTP charset

- - -
- - -
 
- - - - - -
-

The character encoding of a page can be set using the HTTP header charset declaration.

-

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.

-
-
-
HTML5
-

the-input-byte-stream-001
Result summary & related tests
Detailed results for this test
Link to spec

-
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • -
  • The test is read from a server that supports HTTP.
-
- - - - - - diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html deleted file mode 100644 index 26e5d8b4..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - HTTP vs UTF-8 BOM - - - - - - - - - - - -

HTTP vs UTF-8 BOM

- - -
- - -
 
- - - - - -
-

A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.

-

The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

If the test is unsuccessful, the characters  should appear at the top of the page. These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.

-
-
-
HTML5
-

the-input-byte-stream-034
Result summary & related tests
Detailed results for this test
Link to spec

-
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • -
  • The test is read from a server that supports HTTP.
-
- - - - - - diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html deleted file mode 100644 index 2f07e951..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html +++ /dev/null @@ -1,49 +0,0 @@ - - - - HTTP vs meta charset - - - - - - - - - - - -

HTTP vs meta charset

- - -
- - -
 
- - - - - -
-

The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.

-

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

-
-
-
HTML5
-

the-input-byte-stream-018
Result summary & related tests
Detailed results for this test
Link to spec

-
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • -
  • The test is read from a server that supports HTTP.
-
- - - - - - diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html deleted file mode 100644 index 6853cdde..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html +++ /dev/null @@ -1,49 +0,0 @@ - - - - HTTP vs meta content - - - - - - - - - - - -

HTTP vs meta content

- - -
- - -
 
- - - - - -
-

The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.

-

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

-
-
-
HTML5
-

the-input-byte-stream-016
Result summary & related tests
Detailed results for this test
Link to spec

-
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • -
  • The test is read from a server that supports HTTP.
-
- - - - - - diff --git a/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html deleted file mode 100644 index 612e26c6..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html +++ /dev/null @@ -1,47 +0,0 @@ - - - - No encoding declaration - - - - - - - - - - - -

No encoding declaration

- - -
- - -
 
- - - - - -
-

A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.

-

The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

-
-
-
HTML5
-

the-input-byte-stream-015
Result summary & related tests
Detailed results for this test
Link to spec

-
Assumptions:
  • The test is read from a server that supports HTTP.
-
- - - - - - diff --git a/vendor/golang.org/x/net/html/charset/testdata/README b/vendor/golang.org/x/net/html/charset/testdata/README deleted file mode 100644 index 38ef0f9f..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/README +++ /dev/null @@ -1,9 +0,0 @@ -These test cases come from -http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics - -Distributed under both the W3C Test Suite License -(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license) -and the W3C 3-clause BSD License -(http://www.w3.org/Consortium/Legal/2008/03-bsd-license). -To contribute to a W3C Test Suite, see the policies and contribution -forms (http://www.w3.org/2004/10/27-testcases). diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html deleted file mode 100644 index 3abf7a9343c20518e57dfea58b374fb0f4fb58a1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2670 zcmcJR?QRoS5Qc}JAoU&=BQ-(7b^;2j8i*i3RV1JlO@;VXIsPurV!WHiDdLW}i`*CO z^UnC>tih=KsVr;H&Y7?C&O3AV(?534uG?e##U9y_y|!QNi4``n+D>d{2lky^LnFNx z?9HrarH$>rwQR_$g)Hk0*&STI*EYq|47~&U9sfUB+ji})9eR{QqCUra7oDsZ5obtB zdxP%<)-$4Q;rSHJiM>U(#ZI=;?n^BC?Dp6lu=~_1-lnX3u03&2BlmQIY>L+!Uq7XoytKw^Q#oZSM?3*J?)&ojG&yzQRkC!Ml5JE?ax;lp_NYEcdUht`ZswOviB~L5hmJ|pXI71nn20w;>vG! zQGB$EE9&wC``&J#_Ym~PgRu-Bd>1!pOp0||k`kr=VJ zfH6I6rmRaeHA7U-A^OTsT+|d2a^i(>DePzZ{)ibXoCBvJnuYrd-3kkN$uy{qQK;=*Y;S87ro12aTgu^i*%f8zC3>a}9DIe4cfxOzsCw&(cqvP9{ud{N6f` z#TNDY(B6@Gpr|uN+%&x^XZjBHdc@2vsM(Tyc2=vshHQ5w+obmp>tuWT(t4BTUGAQw zxeI$UGSLUBg=WFbF;4f@4=^P2AgY@CFn8A`bcC=_&~)fiDe)#cUARRBzJ^k|%X)69 z+{Cb`wq}Rsg%B62CC_tK!AV(W{(MV?#mndR46CU#BUN<{8e?*oT+!pE5wF#O#TR#a z$9qRT)tpbw8zAI~QQJg2C3|6$I%(T(;`zOMy6SO+&;pG=c#2P|P-WZn$$DpWJlC3U z3*nvmz zwP{u~r$L?-m3uqp9I1+#3yE|3M$(s-BEtih=LQ>`qYoiktOop(wi%!;yh%+Rm z{e|xntY<{q!1F1Z6MKtngPm-p-4|H&+3m4AVE3_AyiHm6Tzlf4M(*ht*%YrezJ6kr zHGj45pc?64*$Cm%-zseWMA`x;)v*~jA=i}szqts9xmQkS`M11|(H7bTXAycsXU53+ zJ?120SRZeyiFjW7enPN`bxk$IaWV3o48oJF7D&2ysoY;6(s6%6vVfaYd&mC=erK!) zNGI^7upQgN)53OHe_VE<@J+G8*Y|p*)zB2Thdi}+YR<5QWHm!|a_*AoZXuv7)$xe| zm3Q$D7{|#}{m4X&UY!6(ZhyYi2(5JLzGE$H)W6BQklnjPMwn<Yvv7Z*TVWwD*=E3QpH37* z#lqXJA0A~J9T_<^W5smspmDg2p6ac5Bjn+~LAoow%1TCdZ*$K8`O zw_$HaCi+0N&@7la#_7KL5r$+QL{)Pi=I&aDjt~|Knht#`CEi4*3%97i_fSfASlwUz0=3V0GCxY}z81UC-nP=CGt2OqYV$ zoRCo+qM9YX*3FFORLC=E3B~S@+KROyk4r5 yX7?DaslDfIebqXgC!KKp4IYy+W~X?ddE6o=`A+x#x0AK&6MF#W&AXxbRrv+SX}PNa diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html deleted file mode 100644 index 83de4333..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html +++ /dev/null @@ -1,49 +0,0 @@ - - - - UTF-8 BOM vs meta charset - - - - - - - - - - - -

UTF-8 BOM vs meta charset

- - -
- - -
 
- - - - - -
-

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.

-

The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

-
-
-
HTML5
-

the-input-byte-stream-038
Result summary & related tests
Detailed results for this test
Link to spec

-
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • -
  • The test is read from a server that supports HTTP.
-
- - - - - - diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html deleted file mode 100644 index 501aac2d..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - UTF-8 BOM vs meta content - - - - - - - - - - - -

UTF-8 BOM vs meta content

- - -
- - -
 
- - - - - -
-

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.

-

The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

-
-
-
HTML5
-

the-input-byte-stream-037
Result summary & related tests
Detailed results for this test
Link to spec

-
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • -
  • The test is read from a server that supports HTTP.
-
- - - - - - diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html deleted file mode 100644 index 2d7d25ab..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - meta charset attribute - - - - - - - - - - - -

meta charset attribute

- - -
- - -
 
- - - - - -
-

The character encoding of the page can be set by a meta element with charset attribute.

-

The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

-
-
-
HTML5
-

the-input-byte-stream-009
Result summary & related tests
Detailed results for this test
Link to spec

-
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • -
  • The test is read from a server that supports HTTP.
-
- - - - - - diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html deleted file mode 100644 index 1c3f228e..00000000 --- a/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - meta content attribute - - - - - - - - - - - -

meta content attribute

- - -
- - -
 
- - - - - -
-

The character encoding of the page can be set by a meta element with http-equiv and content attributes.

-

The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

-
-
-
HTML5
-

the-input-byte-stream-007
Result summary & related tests
Detailed results for this test
Link to spec

-
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • -
  • The test is read from a server that supports HTTP.
-
- - - - - - diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go deleted file mode 100644 index 52f651ff..00000000 --- a/vendor/golang.org/x/net/html/const.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// Section 12.2.3.2 of the HTML5 specification says "The following elements -// have varying levels of special parsing rules". -// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements -var isSpecialElementMap = map[string]bool{ - "address": true, - "applet": true, - "area": true, - "article": true, - "aside": true, - "base": true, - "basefont": true, - "bgsound": true, - "blockquote": true, - "body": true, - "br": true, - "button": true, - "caption": true, - "center": true, - "col": true, - "colgroup": true, - "dd": true, - "details": true, - "dir": true, - "div": true, - "dl": true, - "dt": true, - "embed": true, - "fieldset": true, - "figcaption": true, - "figure": true, - "footer": true, - "form": true, - "frame": true, - "frameset": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "header": true, - "hgroup": true, - "hr": true, - "html": true, - "iframe": true, - "img": true, - "input": true, - "isindex": true, - "li": true, - "link": true, - "listing": true, - "marquee": true, - "menu": true, - "meta": true, - "nav": true, - "noembed": true, - "noframes": true, - "noscript": true, - "object": true, - "ol": true, - "p": true, - "param": true, - "plaintext": true, - "pre": true, - "script": true, - "section": true, - "select": true, - "source": true, - "style": true, - "summary": true, - "table": true, - "tbody": true, - "td": true, - "template": true, - "textarea": true, - "tfoot": true, - "th": true, - "thead": true, - "title": true, - "tr": true, - "track": true, - "ul": true, - "wbr": true, - "xmp": true, -} - -func isSpecialElement(element *Node) bool { - switch element.Namespace { - case "", "html": - return isSpecialElementMap[element.Data] - case "svg": - return element.Data == "foreignObject" - } - return false -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go deleted file mode 100644 index 94f49687..00000000 --- a/vendor/golang.org/x/net/html/doc.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package html implements an HTML5-compliant tokenizer and parser. - -Tokenization is done by creating a Tokenizer for an io.Reader r. It is the -caller's responsibility to ensure that r provides UTF-8 encoded HTML. - - z := html.NewTokenizer(r) - -Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), -which parses the next token and returns its type, or an error: - - for { - tt := z.Next() - if tt == html.ErrorToken { - // ... - return ... - } - // Process the current token. - } - -There are two APIs for retrieving the current token. The high-level API is to -call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs -allow optionally calling Raw after Next but before Token, Text, TagName, or -TagAttr. In EBNF notation, the valid call sequence per token is: - - Next {Raw} [ Token | Text | TagName {TagAttr} ] - -Token returns an independent data structure that completely describes a token. -Entities (such as "<") are unescaped, tag names and attribute keys are -lower-cased, and attributes are collected into a []Attribute. For example: - - for { - if z.Next() == html.ErrorToken { - // Returning io.EOF indicates success. - return z.Err() - } - emitToken(z.Token()) - } - -The low-level API performs fewer allocations and copies, but the contents of -the []byte values returned by Text, TagName and TagAttr may change on the next -call to Next. For example, to extract an HTML page's anchor text: - - depth := 0 - for { - tt := z.Next() - switch tt { - case ErrorToken: - return z.Err() - case TextToken: - if depth > 0 { - // emitBytes should copy the []byte it receives, - // if it doesn't process it immediately. - emitBytes(z.Text()) - } - case StartTagToken, EndTagToken: - tn, _ := z.TagName() - if len(tn) == 1 && tn[0] == 'a' { - if tt == StartTagToken { - depth++ - } else { - depth-- - } - } - } - } - -Parsing is done by calling Parse with an io.Reader, which returns the root of -the parse tree (the document element) as a *Node. It is the caller's -responsibility to ensure that the Reader provides UTF-8 encoded HTML. For -example, to process each anchor node in depth-first order: - - doc, err := html.Parse(r) - if err != nil { - // ... - } - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - // Do something with n... - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } - } - f(doc) - -The relevant specifications include: -https://html.spec.whatwg.org/multipage/syntax.html and -https://html.spec.whatwg.org/multipage/syntax.html#tokenization -*/ -package html // import "golang.org/x/net/html" - -// The tokenization algorithm implemented by this package is not a line-by-line -// transliteration of the relatively verbose state-machine in the WHATWG -// specification. A more direct approach is used instead, where the program -// counter implies the state, such as whether it is tokenizing a tag or a text -// node. Specification compliance is verified by checking expected and actual -// outputs over a test suite rather than aiming for algorithmic fidelity. - -// TODO(nigeltao): Does a DOM API belong in this package or a separate one? -// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go deleted file mode 100644 index c484e5a9..00000000 --- a/vendor/golang.org/x/net/html/doctype.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -// parseDoctype parses the data from a DoctypeToken into a name, -// public identifier, and system identifier. It returns a Node whose Type -// is DoctypeNode, whose Data is the name, and which has attributes -// named "system" and "public" for the two identifiers if they were present. -// quirks is whether the document should be parsed in "quirks mode". -func parseDoctype(s string) (n *Node, quirks bool) { - n = &Node{Type: DoctypeNode} - - // Find the name. - space := strings.IndexAny(s, whitespace) - if space == -1 { - space = len(s) - } - n.Data = s[:space] - // The comparison to "html" is case-sensitive. - if n.Data != "html" { - quirks = true - } - n.Data = strings.ToLower(n.Data) - s = strings.TrimLeft(s[space:], whitespace) - - if len(s) < 6 { - // It can't start with "PUBLIC" or "SYSTEM". - // Ignore the rest of the string. - return n, quirks || s != "" - } - - key := strings.ToLower(s[:6]) - s = s[6:] - for key == "public" || key == "system" { - s = strings.TrimLeft(s, whitespace) - if s == "" { - break - } - quote := s[0] - if quote != '"' && quote != '\'' { - break - } - s = s[1:] - q := strings.IndexRune(s, rune(quote)) - var id string - if q == -1 { - id = s - s = "" - } else { - id = s[:q] - s = s[q+1:] - } - n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) - if key == "public" { - key = "system" - } else { - key = "" - } - } - - if key != "" || s != "" { - quirks = true - } else if len(n.Attr) > 0 { - if n.Attr[0].Key == "public" { - public := strings.ToLower(n.Attr[0].Val) - switch public { - case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": - quirks = true - default: - for _, q := range quirkyIDs { - if strings.HasPrefix(public, q) { - quirks = true - break - } - } - } - // The following two public IDs only cause quirks mode if there is no system ID. - if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || - strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { - quirks = true - } - } - if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { - quirks = true - } - } - - return n, quirks -} - -// quirkyIDs is a list of public doctype identifiers that cause a document -// to be interpreted in quirks mode. The identifiers should be in lower case. -var quirkyIDs = []string{ - "+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//", -} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go deleted file mode 100644 index a50c04c6..00000000 --- a/vendor/golang.org/x/net/html/entity.go +++ /dev/null @@ -1,2253 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// All entities that do not end with ';' are 6 or fewer bytes long. -const longestEntityWithoutSemicolon = 6 - -// entity is a map from HTML entity names to their values. The semicolon matters: -// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references -// lists both "amp" and "amp;" as two separate entries. -// -// Note that the HTML5 list is larger than the HTML4 list at -// http://www.w3.org/TR/html4/sgml/entities.html -var entity = map[string]rune{ - "AElig;": '\U000000C6', - "AMP;": '\U00000026', - "Aacute;": '\U000000C1', - "Abreve;": '\U00000102', - "Acirc;": '\U000000C2', - "Acy;": '\U00000410', - "Afr;": '\U0001D504', - "Agrave;": '\U000000C0', - "Alpha;": '\U00000391', - "Amacr;": '\U00000100', - "And;": '\U00002A53', - "Aogon;": '\U00000104', - "Aopf;": '\U0001D538', - "ApplyFunction;": '\U00002061', - "Aring;": '\U000000C5', - "Ascr;": '\U0001D49C', - "Assign;": '\U00002254', - "Atilde;": '\U000000C3', - "Auml;": '\U000000C4', - "Backslash;": '\U00002216', - "Barv;": '\U00002AE7', - "Barwed;": '\U00002306', - "Bcy;": '\U00000411', - "Because;": '\U00002235', - "Bernoullis;": '\U0000212C', - "Beta;": '\U00000392', - "Bfr;": '\U0001D505', - "Bopf;": '\U0001D539', - "Breve;": '\U000002D8', - "Bscr;": '\U0000212C', - "Bumpeq;": '\U0000224E', - "CHcy;": '\U00000427', - "COPY;": '\U000000A9', - "Cacute;": '\U00000106', - "Cap;": '\U000022D2', - "CapitalDifferentialD;": '\U00002145', - "Cayleys;": '\U0000212D', - "Ccaron;": '\U0000010C', - "Ccedil;": '\U000000C7', - "Ccirc;": '\U00000108', - "Cconint;": '\U00002230', - "Cdot;": '\U0000010A', - "Cedilla;": '\U000000B8', - "CenterDot;": '\U000000B7', - "Cfr;": '\U0000212D', - "Chi;": '\U000003A7', - "CircleDot;": '\U00002299', - "CircleMinus;": '\U00002296', - "CirclePlus;": '\U00002295', - "CircleTimes;": '\U00002297', - "ClockwiseContourIntegral;": '\U00002232', - "CloseCurlyDoubleQuote;": '\U0000201D', - "CloseCurlyQuote;": '\U00002019', - "Colon;": '\U00002237', - "Colone;": '\U00002A74', - "Congruent;": '\U00002261', - "Conint;": '\U0000222F', - "ContourIntegral;": '\U0000222E', - "Copf;": '\U00002102', - "Coproduct;": '\U00002210', - "CounterClockwiseContourIntegral;": '\U00002233', - "Cross;": '\U00002A2F', - "Cscr;": '\U0001D49E', - "Cup;": '\U000022D3', - "CupCap;": '\U0000224D', - "DD;": '\U00002145', - "DDotrahd;": '\U00002911', - "DJcy;": '\U00000402', - "DScy;": '\U00000405', - "DZcy;": '\U0000040F', - "Dagger;": '\U00002021', - "Darr;": '\U000021A1', - "Dashv;": '\U00002AE4', - "Dcaron;": '\U0000010E', - "Dcy;": '\U00000414', - "Del;": '\U00002207', - "Delta;": '\U00000394', - "Dfr;": '\U0001D507', - "DiacriticalAcute;": '\U000000B4', - "DiacriticalDot;": '\U000002D9', - "DiacriticalDoubleAcute;": '\U000002DD', - "DiacriticalGrave;": '\U00000060', - "DiacriticalTilde;": '\U000002DC', - "Diamond;": '\U000022C4', - "DifferentialD;": '\U00002146', - "Dopf;": '\U0001D53B', - "Dot;": '\U000000A8', - "DotDot;": '\U000020DC', - "DotEqual;": '\U00002250', - "DoubleContourIntegral;": '\U0000222F', - "DoubleDot;": '\U000000A8', - "DoubleDownArrow;": '\U000021D3', - "DoubleLeftArrow;": '\U000021D0', - "DoubleLeftRightArrow;": '\U000021D4', - "DoubleLeftTee;": '\U00002AE4', - "DoubleLongLeftArrow;": '\U000027F8', - "DoubleLongLeftRightArrow;": '\U000027FA', - "DoubleLongRightArrow;": '\U000027F9', - "DoubleRightArrow;": '\U000021D2', - "DoubleRightTee;": '\U000022A8', - "DoubleUpArrow;": '\U000021D1', - "DoubleUpDownArrow;": '\U000021D5', - "DoubleVerticalBar;": '\U00002225', - "DownArrow;": '\U00002193', - "DownArrowBar;": '\U00002913', - "DownArrowUpArrow;": '\U000021F5', - "DownBreve;": '\U00000311', - "DownLeftRightVector;": '\U00002950', - "DownLeftTeeVector;": '\U0000295E', - "DownLeftVector;": '\U000021BD', - "DownLeftVectorBar;": '\U00002956', - "DownRightTeeVector;": '\U0000295F', - "DownRightVector;": '\U000021C1', - "DownRightVectorBar;": '\U00002957', - "DownTee;": '\U000022A4', - "DownTeeArrow;": '\U000021A7', - "Downarrow;": '\U000021D3', - "Dscr;": '\U0001D49F', - "Dstrok;": '\U00000110', - "ENG;": '\U0000014A', - "ETH;": '\U000000D0', - "Eacute;": '\U000000C9', - "Ecaron;": '\U0000011A', - "Ecirc;": '\U000000CA', - "Ecy;": '\U0000042D', - "Edot;": '\U00000116', - "Efr;": '\U0001D508', - "Egrave;": '\U000000C8', - "Element;": '\U00002208', - "Emacr;": '\U00000112', - "EmptySmallSquare;": '\U000025FB', - "EmptyVerySmallSquare;": '\U000025AB', - "Eogon;": '\U00000118', - "Eopf;": '\U0001D53C', - "Epsilon;": '\U00000395', - "Equal;": '\U00002A75', - "EqualTilde;": '\U00002242', - "Equilibrium;": '\U000021CC', - "Escr;": '\U00002130', - "Esim;": '\U00002A73', - "Eta;": '\U00000397', - "Euml;": '\U000000CB', - "Exists;": '\U00002203', - "ExponentialE;": '\U00002147', - "Fcy;": '\U00000424', - "Ffr;": '\U0001D509', - "FilledSmallSquare;": '\U000025FC', - "FilledVerySmallSquare;": '\U000025AA', - "Fopf;": '\U0001D53D', - "ForAll;": '\U00002200', - "Fouriertrf;": '\U00002131', - "Fscr;": '\U00002131', - "GJcy;": '\U00000403', - "GT;": '\U0000003E', - "Gamma;": '\U00000393', - "Gammad;": '\U000003DC', - "Gbreve;": '\U0000011E', - "Gcedil;": '\U00000122', - "Gcirc;": '\U0000011C', - "Gcy;": '\U00000413', - "Gdot;": '\U00000120', - "Gfr;": '\U0001D50A', - "Gg;": '\U000022D9', - "Gopf;": '\U0001D53E', - "GreaterEqual;": '\U00002265', - "GreaterEqualLess;": '\U000022DB', - "GreaterFullEqual;": '\U00002267', - "GreaterGreater;": '\U00002AA2', - "GreaterLess;": '\U00002277', - "GreaterSlantEqual;": '\U00002A7E', - "GreaterTilde;": '\U00002273', - "Gscr;": '\U0001D4A2', - "Gt;": '\U0000226B', - "HARDcy;": '\U0000042A', - "Hacek;": '\U000002C7', - "Hat;": '\U0000005E', - "Hcirc;": '\U00000124', - "Hfr;": '\U0000210C', - "HilbertSpace;": '\U0000210B', - "Hopf;": '\U0000210D', - "HorizontalLine;": '\U00002500', - "Hscr;": '\U0000210B', - "Hstrok;": '\U00000126', - "HumpDownHump;": '\U0000224E', - "HumpEqual;": '\U0000224F', - "IEcy;": '\U00000415', - "IJlig;": '\U00000132', - "IOcy;": '\U00000401', - "Iacute;": '\U000000CD', - "Icirc;": '\U000000CE', - "Icy;": '\U00000418', - "Idot;": '\U00000130', - "Ifr;": '\U00002111', - "Igrave;": '\U000000CC', - "Im;": '\U00002111', - "Imacr;": '\U0000012A', - "ImaginaryI;": '\U00002148', - "Implies;": '\U000021D2', - "Int;": '\U0000222C', - "Integral;": '\U0000222B', - "Intersection;": '\U000022C2', - "InvisibleComma;": '\U00002063', - "InvisibleTimes;": '\U00002062', - "Iogon;": '\U0000012E', - "Iopf;": '\U0001D540', - "Iota;": '\U00000399', - "Iscr;": '\U00002110', - "Itilde;": '\U00000128', - "Iukcy;": '\U00000406', - "Iuml;": '\U000000CF', - "Jcirc;": '\U00000134', - "Jcy;": '\U00000419', - "Jfr;": '\U0001D50D', - "Jopf;": '\U0001D541', - "Jscr;": '\U0001D4A5', - "Jsercy;": '\U00000408', - "Jukcy;": '\U00000404', - "KHcy;": '\U00000425', - "KJcy;": '\U0000040C', - "Kappa;": '\U0000039A', - "Kcedil;": '\U00000136', - "Kcy;": '\U0000041A', - "Kfr;": '\U0001D50E', - "Kopf;": '\U0001D542', - "Kscr;": '\U0001D4A6', - "LJcy;": '\U00000409', - "LT;": '\U0000003C', - "Lacute;": '\U00000139', - "Lambda;": '\U0000039B', - "Lang;": '\U000027EA', - "Laplacetrf;": '\U00002112', - "Larr;": '\U0000219E', - "Lcaron;": '\U0000013D', - "Lcedil;": '\U0000013B', - "Lcy;": '\U0000041B', - "LeftAngleBracket;": '\U000027E8', - "LeftArrow;": '\U00002190', - "LeftArrowBar;": '\U000021E4', - "LeftArrowRightArrow;": '\U000021C6', - "LeftCeiling;": '\U00002308', - "LeftDoubleBracket;": '\U000027E6', - "LeftDownTeeVector;": '\U00002961', - "LeftDownVector;": '\U000021C3', - "LeftDownVectorBar;": '\U00002959', - "LeftFloor;": '\U0000230A', - "LeftRightArrow;": '\U00002194', - "LeftRightVector;": '\U0000294E', - "LeftTee;": '\U000022A3', - "LeftTeeArrow;": '\U000021A4', - "LeftTeeVector;": '\U0000295A', - "LeftTriangle;": '\U000022B2', - "LeftTriangleBar;": '\U000029CF', - "LeftTriangleEqual;": '\U000022B4', - "LeftUpDownVector;": '\U00002951', - "LeftUpTeeVector;": '\U00002960', - "LeftUpVector;": '\U000021BF', - "LeftUpVectorBar;": '\U00002958', - "LeftVector;": '\U000021BC', - "LeftVectorBar;": '\U00002952', - "Leftarrow;": '\U000021D0', - "Leftrightarrow;": '\U000021D4', - "LessEqualGreater;": '\U000022DA', - "LessFullEqual;": '\U00002266', - "LessGreater;": '\U00002276', - "LessLess;": '\U00002AA1', - "LessSlantEqual;": '\U00002A7D', - "LessTilde;": '\U00002272', - "Lfr;": '\U0001D50F', - "Ll;": '\U000022D8', - "Lleftarrow;": '\U000021DA', - "Lmidot;": '\U0000013F', - "LongLeftArrow;": '\U000027F5', - "LongLeftRightArrow;": '\U000027F7', - "LongRightArrow;": '\U000027F6', - "Longleftarrow;": '\U000027F8', - "Longleftrightarrow;": '\U000027FA', - "Longrightarrow;": '\U000027F9', - "Lopf;": '\U0001D543', - "LowerLeftArrow;": '\U00002199', - "LowerRightArrow;": '\U00002198', - "Lscr;": '\U00002112', - "Lsh;": '\U000021B0', - "Lstrok;": '\U00000141', - "Lt;": '\U0000226A', - "Map;": '\U00002905', - "Mcy;": '\U0000041C', - "MediumSpace;": '\U0000205F', - "Mellintrf;": '\U00002133', - "Mfr;": '\U0001D510', - "MinusPlus;": '\U00002213', - "Mopf;": '\U0001D544', - "Mscr;": '\U00002133', - "Mu;": '\U0000039C', - "NJcy;": '\U0000040A', - "Nacute;": '\U00000143', - "Ncaron;": '\U00000147', - "Ncedil;": '\U00000145', - "Ncy;": '\U0000041D', - "NegativeMediumSpace;": '\U0000200B', - "NegativeThickSpace;": '\U0000200B', - "NegativeThinSpace;": '\U0000200B', - "NegativeVeryThinSpace;": '\U0000200B', - "NestedGreaterGreater;": '\U0000226B', - "NestedLessLess;": '\U0000226A', - "NewLine;": '\U0000000A', - "Nfr;": '\U0001D511', - "NoBreak;": '\U00002060', - "NonBreakingSpace;": '\U000000A0', - "Nopf;": '\U00002115', - "Not;": '\U00002AEC', - "NotCongruent;": '\U00002262', - "NotCupCap;": '\U0000226D', - "NotDoubleVerticalBar;": '\U00002226', - "NotElement;": '\U00002209', - "NotEqual;": '\U00002260', - "NotExists;": '\U00002204', - "NotGreater;": '\U0000226F', - "NotGreaterEqual;": '\U00002271', - "NotGreaterLess;": '\U00002279', - "NotGreaterTilde;": '\U00002275', - "NotLeftTriangle;": '\U000022EA', - "NotLeftTriangleEqual;": '\U000022EC', - "NotLess;": '\U0000226E', - "NotLessEqual;": '\U00002270', - "NotLessGreater;": '\U00002278', - "NotLessTilde;": '\U00002274', - "NotPrecedes;": '\U00002280', - "NotPrecedesSlantEqual;": '\U000022E0', - "NotReverseElement;": '\U0000220C', - "NotRightTriangle;": '\U000022EB', - "NotRightTriangleEqual;": '\U000022ED', - "NotSquareSubsetEqual;": '\U000022E2', - "NotSquareSupersetEqual;": '\U000022E3', - "NotSubsetEqual;": '\U00002288', - "NotSucceeds;": '\U00002281', - "NotSucceedsSlantEqual;": '\U000022E1', - "NotSupersetEqual;": '\U00002289', - "NotTilde;": '\U00002241', - "NotTildeEqual;": '\U00002244', - "NotTildeFullEqual;": '\U00002247', - "NotTildeTilde;": '\U00002249', - "NotVerticalBar;": '\U00002224', - "Nscr;": '\U0001D4A9', - "Ntilde;": '\U000000D1', - "Nu;": '\U0000039D', - "OElig;": '\U00000152', - "Oacute;": '\U000000D3', - "Ocirc;": '\U000000D4', - "Ocy;": '\U0000041E', - "Odblac;": '\U00000150', - "Ofr;": '\U0001D512', - "Ograve;": '\U000000D2', - "Omacr;": '\U0000014C', - "Omega;": '\U000003A9', - "Omicron;": '\U0000039F', - "Oopf;": '\U0001D546', - "OpenCurlyDoubleQuote;": '\U0000201C', - "OpenCurlyQuote;": '\U00002018', - "Or;": '\U00002A54', - "Oscr;": '\U0001D4AA', - "Oslash;": '\U000000D8', - "Otilde;": '\U000000D5', - "Otimes;": '\U00002A37', - "Ouml;": '\U000000D6', - "OverBar;": '\U0000203E', - "OverBrace;": '\U000023DE', - "OverBracket;": '\U000023B4', - "OverParenthesis;": '\U000023DC', - "PartialD;": '\U00002202', - "Pcy;": '\U0000041F', - "Pfr;": '\U0001D513', - "Phi;": '\U000003A6', - "Pi;": '\U000003A0', - "PlusMinus;": '\U000000B1', - "Poincareplane;": '\U0000210C', - "Popf;": '\U00002119', - "Pr;": '\U00002ABB', - "Precedes;": '\U0000227A', - "PrecedesEqual;": '\U00002AAF', - "PrecedesSlantEqual;": '\U0000227C', - "PrecedesTilde;": '\U0000227E', - "Prime;": '\U00002033', - "Product;": '\U0000220F', - "Proportion;": '\U00002237', - "Proportional;": '\U0000221D', - "Pscr;": '\U0001D4AB', - "Psi;": '\U000003A8', - "QUOT;": '\U00000022', - "Qfr;": '\U0001D514', - "Qopf;": '\U0000211A', - "Qscr;": '\U0001D4AC', - "RBarr;": '\U00002910', - "REG;": '\U000000AE', - "Racute;": '\U00000154', - "Rang;": '\U000027EB', - "Rarr;": '\U000021A0', - "Rarrtl;": '\U00002916', - "Rcaron;": '\U00000158', - "Rcedil;": '\U00000156', - "Rcy;": '\U00000420', - "Re;": '\U0000211C', - "ReverseElement;": '\U0000220B', - "ReverseEquilibrium;": '\U000021CB', - "ReverseUpEquilibrium;": '\U0000296F', - "Rfr;": '\U0000211C', - "Rho;": '\U000003A1', - "RightAngleBracket;": '\U000027E9', - "RightArrow;": '\U00002192', - "RightArrowBar;": '\U000021E5', - "RightArrowLeftArrow;": '\U000021C4', - "RightCeiling;": '\U00002309', - "RightDoubleBracket;": '\U000027E7', - "RightDownTeeVector;": '\U0000295D', - "RightDownVector;": '\U000021C2', - "RightDownVectorBar;": '\U00002955', - "RightFloor;": '\U0000230B', - "RightTee;": '\U000022A2', - "RightTeeArrow;": '\U000021A6', - "RightTeeVector;": '\U0000295B', - "RightTriangle;": '\U000022B3', - "RightTriangleBar;": '\U000029D0', - "RightTriangleEqual;": '\U000022B5', - "RightUpDownVector;": '\U0000294F', - "RightUpTeeVector;": '\U0000295C', - "RightUpVector;": '\U000021BE', - "RightUpVectorBar;": '\U00002954', - "RightVector;": '\U000021C0', - "RightVectorBar;": '\U00002953', - "Rightarrow;": '\U000021D2', - "Ropf;": '\U0000211D', - "RoundImplies;": '\U00002970', - "Rrightarrow;": '\U000021DB', - "Rscr;": '\U0000211B', - "Rsh;": '\U000021B1', - "RuleDelayed;": '\U000029F4', - "SHCHcy;": '\U00000429', - "SHcy;": '\U00000428', - "SOFTcy;": '\U0000042C', - "Sacute;": '\U0000015A', - "Sc;": '\U00002ABC', - "Scaron;": '\U00000160', - "Scedil;": '\U0000015E', - "Scirc;": '\U0000015C', - "Scy;": '\U00000421', - "Sfr;": '\U0001D516', - "ShortDownArrow;": '\U00002193', - "ShortLeftArrow;": '\U00002190', - "ShortRightArrow;": '\U00002192', - "ShortUpArrow;": '\U00002191', - "Sigma;": '\U000003A3', - "SmallCircle;": '\U00002218', - "Sopf;": '\U0001D54A', - "Sqrt;": '\U0000221A', - "Square;": '\U000025A1', - "SquareIntersection;": '\U00002293', - "SquareSubset;": '\U0000228F', - "SquareSubsetEqual;": '\U00002291', - "SquareSuperset;": '\U00002290', - "SquareSupersetEqual;": '\U00002292', - "SquareUnion;": '\U00002294', - "Sscr;": '\U0001D4AE', - "Star;": '\U000022C6', - "Sub;": '\U000022D0', - "Subset;": '\U000022D0', - "SubsetEqual;": '\U00002286', - "Succeeds;": '\U0000227B', - "SucceedsEqual;": '\U00002AB0', - "SucceedsSlantEqual;": '\U0000227D', - "SucceedsTilde;": '\U0000227F', - "SuchThat;": '\U0000220B', - "Sum;": '\U00002211', - "Sup;": '\U000022D1', - "Superset;": '\U00002283', - "SupersetEqual;": '\U00002287', - "Supset;": '\U000022D1', - "THORN;": '\U000000DE', - "TRADE;": '\U00002122', - "TSHcy;": '\U0000040B', - "TScy;": '\U00000426', - "Tab;": '\U00000009', - "Tau;": '\U000003A4', - "Tcaron;": '\U00000164', - "Tcedil;": '\U00000162', - "Tcy;": '\U00000422', - "Tfr;": '\U0001D517', - "Therefore;": '\U00002234', - "Theta;": '\U00000398', - "ThinSpace;": '\U00002009', - "Tilde;": '\U0000223C', - "TildeEqual;": '\U00002243', - "TildeFullEqual;": '\U00002245', - "TildeTilde;": '\U00002248', - "Topf;": '\U0001D54B', - "TripleDot;": '\U000020DB', - "Tscr;": '\U0001D4AF', - "Tstrok;": '\U00000166', - "Uacute;": '\U000000DA', - "Uarr;": '\U0000219F', - "Uarrocir;": '\U00002949', - "Ubrcy;": '\U0000040E', - "Ubreve;": '\U0000016C', - "Ucirc;": '\U000000DB', - "Ucy;": '\U00000423', - "Udblac;": '\U00000170', - "Ufr;": '\U0001D518', - "Ugrave;": '\U000000D9', - "Umacr;": '\U0000016A', - "UnderBar;": '\U0000005F', - "UnderBrace;": '\U000023DF', - "UnderBracket;": '\U000023B5', - "UnderParenthesis;": '\U000023DD', - "Union;": '\U000022C3', - "UnionPlus;": '\U0000228E', - "Uogon;": '\U00000172', - "Uopf;": '\U0001D54C', - "UpArrow;": '\U00002191', - "UpArrowBar;": '\U00002912', - "UpArrowDownArrow;": '\U000021C5', - "UpDownArrow;": '\U00002195', - "UpEquilibrium;": '\U0000296E', - "UpTee;": '\U000022A5', - "UpTeeArrow;": '\U000021A5', - "Uparrow;": '\U000021D1', - "Updownarrow;": '\U000021D5', - "UpperLeftArrow;": '\U00002196', - "UpperRightArrow;": '\U00002197', - "Upsi;": '\U000003D2', - "Upsilon;": '\U000003A5', - "Uring;": '\U0000016E', - "Uscr;": '\U0001D4B0', - "Utilde;": '\U00000168', - "Uuml;": '\U000000DC', - "VDash;": '\U000022AB', - "Vbar;": '\U00002AEB', - "Vcy;": '\U00000412', - "Vdash;": '\U000022A9', - "Vdashl;": '\U00002AE6', - "Vee;": '\U000022C1', - "Verbar;": '\U00002016', - "Vert;": '\U00002016', - "VerticalBar;": '\U00002223', - "VerticalLine;": '\U0000007C', - "VerticalSeparator;": '\U00002758', - "VerticalTilde;": '\U00002240', - "VeryThinSpace;": '\U0000200A', - "Vfr;": '\U0001D519', - "Vopf;": '\U0001D54D', - "Vscr;": '\U0001D4B1', - "Vvdash;": '\U000022AA', - "Wcirc;": '\U00000174', - "Wedge;": '\U000022C0', - "Wfr;": '\U0001D51A', - "Wopf;": '\U0001D54E', - "Wscr;": '\U0001D4B2', - "Xfr;": '\U0001D51B', - "Xi;": '\U0000039E', - "Xopf;": '\U0001D54F', - "Xscr;": '\U0001D4B3', - "YAcy;": '\U0000042F', - "YIcy;": '\U00000407', - "YUcy;": '\U0000042E', - "Yacute;": '\U000000DD', - "Ycirc;": '\U00000176', - "Ycy;": '\U0000042B', - "Yfr;": '\U0001D51C', - "Yopf;": '\U0001D550', - "Yscr;": '\U0001D4B4', - "Yuml;": '\U00000178', - "ZHcy;": '\U00000416', - "Zacute;": '\U00000179', - "Zcaron;": '\U0000017D', - "Zcy;": '\U00000417', - "Zdot;": '\U0000017B', - "ZeroWidthSpace;": '\U0000200B', - "Zeta;": '\U00000396', - "Zfr;": '\U00002128', - "Zopf;": '\U00002124', - "Zscr;": '\U0001D4B5', - "aacute;": '\U000000E1', - "abreve;": '\U00000103', - "ac;": '\U0000223E', - "acd;": '\U0000223F', - "acirc;": '\U000000E2', - "acute;": '\U000000B4', - "acy;": '\U00000430', - "aelig;": '\U000000E6', - "af;": '\U00002061', - "afr;": '\U0001D51E', - "agrave;": '\U000000E0', - "alefsym;": '\U00002135', - "aleph;": '\U00002135', - "alpha;": '\U000003B1', - "amacr;": '\U00000101', - "amalg;": '\U00002A3F', - "amp;": '\U00000026', - "and;": '\U00002227', - "andand;": '\U00002A55', - "andd;": '\U00002A5C', - "andslope;": '\U00002A58', - "andv;": '\U00002A5A', - "ang;": '\U00002220', - "ange;": '\U000029A4', - "angle;": '\U00002220', - "angmsd;": '\U00002221', - "angmsdaa;": '\U000029A8', - "angmsdab;": '\U000029A9', - "angmsdac;": '\U000029AA', - "angmsdad;": '\U000029AB', - "angmsdae;": '\U000029AC', - "angmsdaf;": '\U000029AD', - "angmsdag;": '\U000029AE', - "angmsdah;": '\U000029AF', - "angrt;": '\U0000221F', - "angrtvb;": '\U000022BE', - "angrtvbd;": '\U0000299D', - "angsph;": '\U00002222', - "angst;": '\U000000C5', - "angzarr;": '\U0000237C', - "aogon;": '\U00000105', - "aopf;": '\U0001D552', - "ap;": '\U00002248', - "apE;": '\U00002A70', - "apacir;": '\U00002A6F', - "ape;": '\U0000224A', - "apid;": '\U0000224B', - "apos;": '\U00000027', - "approx;": '\U00002248', - "approxeq;": '\U0000224A', - "aring;": '\U000000E5', - "ascr;": '\U0001D4B6', - "ast;": '\U0000002A', - "asymp;": '\U00002248', - "asympeq;": '\U0000224D', - "atilde;": '\U000000E3', - "auml;": '\U000000E4', - "awconint;": '\U00002233', - "awint;": '\U00002A11', - "bNot;": '\U00002AED', - "backcong;": '\U0000224C', - "backepsilon;": '\U000003F6', - "backprime;": '\U00002035', - "backsim;": '\U0000223D', - "backsimeq;": '\U000022CD', - "barvee;": '\U000022BD', - "barwed;": '\U00002305', - "barwedge;": '\U00002305', - "bbrk;": '\U000023B5', - "bbrktbrk;": '\U000023B6', - "bcong;": '\U0000224C', - "bcy;": '\U00000431', - "bdquo;": '\U0000201E', - "becaus;": '\U00002235', - "because;": '\U00002235', - "bemptyv;": '\U000029B0', - "bepsi;": '\U000003F6', - "bernou;": '\U0000212C', - "beta;": '\U000003B2', - "beth;": '\U00002136', - "between;": '\U0000226C', - "bfr;": '\U0001D51F', - "bigcap;": '\U000022C2', - "bigcirc;": '\U000025EF', - "bigcup;": '\U000022C3', - "bigodot;": '\U00002A00', - "bigoplus;": '\U00002A01', - "bigotimes;": '\U00002A02', - "bigsqcup;": '\U00002A06', - "bigstar;": '\U00002605', - "bigtriangledown;": '\U000025BD', - "bigtriangleup;": '\U000025B3', - "biguplus;": '\U00002A04', - "bigvee;": '\U000022C1', - "bigwedge;": '\U000022C0', - "bkarow;": '\U0000290D', - "blacklozenge;": '\U000029EB', - "blacksquare;": '\U000025AA', - "blacktriangle;": '\U000025B4', - "blacktriangledown;": '\U000025BE', - "blacktriangleleft;": '\U000025C2', - "blacktriangleright;": '\U000025B8', - "blank;": '\U00002423', - "blk12;": '\U00002592', - "blk14;": '\U00002591', - "blk34;": '\U00002593', - "block;": '\U00002588', - "bnot;": '\U00002310', - "bopf;": '\U0001D553', - "bot;": '\U000022A5', - "bottom;": '\U000022A5', - "bowtie;": '\U000022C8', - "boxDL;": '\U00002557', - "boxDR;": '\U00002554', - "boxDl;": '\U00002556', - "boxDr;": '\U00002553', - "boxH;": '\U00002550', - "boxHD;": '\U00002566', - "boxHU;": '\U00002569', - "boxHd;": '\U00002564', - "boxHu;": '\U00002567', - "boxUL;": '\U0000255D', - "boxUR;": '\U0000255A', - "boxUl;": '\U0000255C', - "boxUr;": '\U00002559', - "boxV;": '\U00002551', - "boxVH;": '\U0000256C', - "boxVL;": '\U00002563', - "boxVR;": '\U00002560', - "boxVh;": '\U0000256B', - "boxVl;": '\U00002562', - "boxVr;": '\U0000255F', - "boxbox;": '\U000029C9', - "boxdL;": '\U00002555', - "boxdR;": '\U00002552', - "boxdl;": '\U00002510', - "boxdr;": '\U0000250C', - "boxh;": '\U00002500', - "boxhD;": '\U00002565', - "boxhU;": '\U00002568', - "boxhd;": '\U0000252C', - "boxhu;": '\U00002534', - "boxminus;": '\U0000229F', - "boxplus;": '\U0000229E', - "boxtimes;": '\U000022A0', - "boxuL;": '\U0000255B', - "boxuR;": '\U00002558', - "boxul;": '\U00002518', - "boxur;": '\U00002514', - "boxv;": '\U00002502', - "boxvH;": '\U0000256A', - "boxvL;": '\U00002561', - "boxvR;": '\U0000255E', - "boxvh;": '\U0000253C', - "boxvl;": '\U00002524', - "boxvr;": '\U0000251C', - "bprime;": '\U00002035', - "breve;": '\U000002D8', - "brvbar;": '\U000000A6', - "bscr;": '\U0001D4B7', - "bsemi;": '\U0000204F', - "bsim;": '\U0000223D', - "bsime;": '\U000022CD', - "bsol;": '\U0000005C', - "bsolb;": '\U000029C5', - "bsolhsub;": '\U000027C8', - "bull;": '\U00002022', - "bullet;": '\U00002022', - "bump;": '\U0000224E', - "bumpE;": '\U00002AAE', - "bumpe;": '\U0000224F', - "bumpeq;": '\U0000224F', - "cacute;": '\U00000107', - "cap;": '\U00002229', - "capand;": '\U00002A44', - "capbrcup;": '\U00002A49', - "capcap;": '\U00002A4B', - "capcup;": '\U00002A47', - "capdot;": '\U00002A40', - "caret;": '\U00002041', - "caron;": '\U000002C7', - "ccaps;": '\U00002A4D', - "ccaron;": '\U0000010D', - "ccedil;": '\U000000E7', - "ccirc;": '\U00000109', - "ccups;": '\U00002A4C', - "ccupssm;": '\U00002A50', - "cdot;": '\U0000010B', - "cedil;": '\U000000B8', - "cemptyv;": '\U000029B2', - "cent;": '\U000000A2', - "centerdot;": '\U000000B7', - "cfr;": '\U0001D520', - "chcy;": '\U00000447', - "check;": '\U00002713', - "checkmark;": '\U00002713', - "chi;": '\U000003C7', - "cir;": '\U000025CB', - "cirE;": '\U000029C3', - "circ;": '\U000002C6', - "circeq;": '\U00002257', - "circlearrowleft;": '\U000021BA', - "circlearrowright;": '\U000021BB', - "circledR;": '\U000000AE', - "circledS;": '\U000024C8', - "circledast;": '\U0000229B', - "circledcirc;": '\U0000229A', - "circleddash;": '\U0000229D', - "cire;": '\U00002257', - "cirfnint;": '\U00002A10', - "cirmid;": '\U00002AEF', - "cirscir;": '\U000029C2', - "clubs;": '\U00002663', - "clubsuit;": '\U00002663', - "colon;": '\U0000003A', - "colone;": '\U00002254', - "coloneq;": '\U00002254', - "comma;": '\U0000002C', - "commat;": '\U00000040', - "comp;": '\U00002201', - "compfn;": '\U00002218', - "complement;": '\U00002201', - "complexes;": '\U00002102', - "cong;": '\U00002245', - "congdot;": '\U00002A6D', - "conint;": '\U0000222E', - "copf;": '\U0001D554', - "coprod;": '\U00002210', - "copy;": '\U000000A9', - "copysr;": '\U00002117', - "crarr;": '\U000021B5', - "cross;": '\U00002717', - "cscr;": '\U0001D4B8', - "csub;": '\U00002ACF', - "csube;": '\U00002AD1', - "csup;": '\U00002AD0', - "csupe;": '\U00002AD2', - "ctdot;": '\U000022EF', - "cudarrl;": '\U00002938', - "cudarrr;": '\U00002935', - "cuepr;": '\U000022DE', - "cuesc;": '\U000022DF', - "cularr;": '\U000021B6', - "cularrp;": '\U0000293D', - "cup;": '\U0000222A', - "cupbrcap;": '\U00002A48', - "cupcap;": '\U00002A46', - "cupcup;": '\U00002A4A', - "cupdot;": '\U0000228D', - "cupor;": '\U00002A45', - "curarr;": '\U000021B7', - "curarrm;": '\U0000293C', - "curlyeqprec;": '\U000022DE', - "curlyeqsucc;": '\U000022DF', - "curlyvee;": '\U000022CE', - "curlywedge;": '\U000022CF', - "curren;": '\U000000A4', - "curvearrowleft;": '\U000021B6', - "curvearrowright;": '\U000021B7', - "cuvee;": '\U000022CE', - "cuwed;": '\U000022CF', - "cwconint;": '\U00002232', - "cwint;": '\U00002231', - "cylcty;": '\U0000232D', - "dArr;": '\U000021D3', - "dHar;": '\U00002965', - "dagger;": '\U00002020', - "daleth;": '\U00002138', - "darr;": '\U00002193', - "dash;": '\U00002010', - "dashv;": '\U000022A3', - "dbkarow;": '\U0000290F', - "dblac;": '\U000002DD', - "dcaron;": '\U0000010F', - "dcy;": '\U00000434', - "dd;": '\U00002146', - "ddagger;": '\U00002021', - "ddarr;": '\U000021CA', - "ddotseq;": '\U00002A77', - "deg;": '\U000000B0', - "delta;": '\U000003B4', - "demptyv;": '\U000029B1', - "dfisht;": '\U0000297F', - "dfr;": '\U0001D521', - "dharl;": '\U000021C3', - "dharr;": '\U000021C2', - "diam;": '\U000022C4', - "diamond;": '\U000022C4', - "diamondsuit;": '\U00002666', - "diams;": '\U00002666', - "die;": '\U000000A8', - "digamma;": '\U000003DD', - "disin;": '\U000022F2', - "div;": '\U000000F7', - "divide;": '\U000000F7', - "divideontimes;": '\U000022C7', - "divonx;": '\U000022C7', - "djcy;": '\U00000452', - "dlcorn;": '\U0000231E', - "dlcrop;": '\U0000230D', - "dollar;": '\U00000024', - "dopf;": '\U0001D555', - "dot;": '\U000002D9', - "doteq;": '\U00002250', - "doteqdot;": '\U00002251', - "dotminus;": '\U00002238', - "dotplus;": '\U00002214', - "dotsquare;": '\U000022A1', - "doublebarwedge;": '\U00002306', - "downarrow;": '\U00002193', - "downdownarrows;": '\U000021CA', - "downharpoonleft;": '\U000021C3', - "downharpoonright;": '\U000021C2', - "drbkarow;": '\U00002910', - "drcorn;": '\U0000231F', - "drcrop;": '\U0000230C', - "dscr;": '\U0001D4B9', - "dscy;": '\U00000455', - "dsol;": '\U000029F6', - "dstrok;": '\U00000111', - "dtdot;": '\U000022F1', - "dtri;": '\U000025BF', - "dtrif;": '\U000025BE', - "duarr;": '\U000021F5', - "duhar;": '\U0000296F', - "dwangle;": '\U000029A6', - "dzcy;": '\U0000045F', - "dzigrarr;": '\U000027FF', - "eDDot;": '\U00002A77', - "eDot;": '\U00002251', - "eacute;": '\U000000E9', - "easter;": '\U00002A6E', - "ecaron;": '\U0000011B', - "ecir;": '\U00002256', - "ecirc;": '\U000000EA', - "ecolon;": '\U00002255', - "ecy;": '\U0000044D', - "edot;": '\U00000117', - "ee;": '\U00002147', - "efDot;": '\U00002252', - "efr;": '\U0001D522', - "eg;": '\U00002A9A', - "egrave;": '\U000000E8', - "egs;": '\U00002A96', - "egsdot;": '\U00002A98', - "el;": '\U00002A99', - "elinters;": '\U000023E7', - "ell;": '\U00002113', - "els;": '\U00002A95', - "elsdot;": '\U00002A97', - "emacr;": '\U00000113', - "empty;": '\U00002205', - "emptyset;": '\U00002205', - "emptyv;": '\U00002205', - "emsp;": '\U00002003', - "emsp13;": '\U00002004', - "emsp14;": '\U00002005', - "eng;": '\U0000014B', - "ensp;": '\U00002002', - "eogon;": '\U00000119', - "eopf;": '\U0001D556', - "epar;": '\U000022D5', - "eparsl;": '\U000029E3', - "eplus;": '\U00002A71', - "epsi;": '\U000003B5', - "epsilon;": '\U000003B5', - "epsiv;": '\U000003F5', - "eqcirc;": '\U00002256', - "eqcolon;": '\U00002255', - "eqsim;": '\U00002242', - "eqslantgtr;": '\U00002A96', - "eqslantless;": '\U00002A95', - "equals;": '\U0000003D', - "equest;": '\U0000225F', - "equiv;": '\U00002261', - "equivDD;": '\U00002A78', - "eqvparsl;": '\U000029E5', - "erDot;": '\U00002253', - "erarr;": '\U00002971', - "escr;": '\U0000212F', - "esdot;": '\U00002250', - "esim;": '\U00002242', - "eta;": '\U000003B7', - "eth;": '\U000000F0', - "euml;": '\U000000EB', - "euro;": '\U000020AC', - "excl;": '\U00000021', - "exist;": '\U00002203', - "expectation;": '\U00002130', - "exponentiale;": '\U00002147', - "fallingdotseq;": '\U00002252', - "fcy;": '\U00000444', - "female;": '\U00002640', - "ffilig;": '\U0000FB03', - "fflig;": '\U0000FB00', - "ffllig;": '\U0000FB04', - "ffr;": '\U0001D523', - "filig;": '\U0000FB01', - "flat;": '\U0000266D', - "fllig;": '\U0000FB02', - "fltns;": '\U000025B1', - "fnof;": '\U00000192', - "fopf;": '\U0001D557', - "forall;": '\U00002200', - "fork;": '\U000022D4', - "forkv;": '\U00002AD9', - "fpartint;": '\U00002A0D', - "frac12;": '\U000000BD', - "frac13;": '\U00002153', - "frac14;": '\U000000BC', - "frac15;": '\U00002155', - "frac16;": '\U00002159', - "frac18;": '\U0000215B', - "frac23;": '\U00002154', - "frac25;": '\U00002156', - "frac34;": '\U000000BE', - "frac35;": '\U00002157', - "frac38;": '\U0000215C', - "frac45;": '\U00002158', - "frac56;": '\U0000215A', - "frac58;": '\U0000215D', - "frac78;": '\U0000215E', - "frasl;": '\U00002044', - "frown;": '\U00002322', - "fscr;": '\U0001D4BB', - "gE;": '\U00002267', - "gEl;": '\U00002A8C', - "gacute;": '\U000001F5', - "gamma;": '\U000003B3', - "gammad;": '\U000003DD', - "gap;": '\U00002A86', - "gbreve;": '\U0000011F', - "gcirc;": '\U0000011D', - "gcy;": '\U00000433', - "gdot;": '\U00000121', - "ge;": '\U00002265', - "gel;": '\U000022DB', - "geq;": '\U00002265', - "geqq;": '\U00002267', - "geqslant;": '\U00002A7E', - "ges;": '\U00002A7E', - "gescc;": '\U00002AA9', - "gesdot;": '\U00002A80', - "gesdoto;": '\U00002A82', - "gesdotol;": '\U00002A84', - "gesles;": '\U00002A94', - "gfr;": '\U0001D524', - "gg;": '\U0000226B', - "ggg;": '\U000022D9', - "gimel;": '\U00002137', - "gjcy;": '\U00000453', - "gl;": '\U00002277', - "glE;": '\U00002A92', - "gla;": '\U00002AA5', - "glj;": '\U00002AA4', - "gnE;": '\U00002269', - "gnap;": '\U00002A8A', - "gnapprox;": '\U00002A8A', - "gne;": '\U00002A88', - "gneq;": '\U00002A88', - "gneqq;": '\U00002269', - "gnsim;": '\U000022E7', - "gopf;": '\U0001D558', - "grave;": '\U00000060', - "gscr;": '\U0000210A', - "gsim;": '\U00002273', - "gsime;": '\U00002A8E', - "gsiml;": '\U00002A90', - "gt;": '\U0000003E', - "gtcc;": '\U00002AA7', - "gtcir;": '\U00002A7A', - "gtdot;": '\U000022D7', - "gtlPar;": '\U00002995', - "gtquest;": '\U00002A7C', - "gtrapprox;": '\U00002A86', - "gtrarr;": '\U00002978', - "gtrdot;": '\U000022D7', - "gtreqless;": '\U000022DB', - "gtreqqless;": '\U00002A8C', - "gtrless;": '\U00002277', - "gtrsim;": '\U00002273', - "hArr;": '\U000021D4', - "hairsp;": '\U0000200A', - "half;": '\U000000BD', - "hamilt;": '\U0000210B', - "hardcy;": '\U0000044A', - "harr;": '\U00002194', - "harrcir;": '\U00002948', - "harrw;": '\U000021AD', - "hbar;": '\U0000210F', - "hcirc;": '\U00000125', - "hearts;": '\U00002665', - "heartsuit;": '\U00002665', - "hellip;": '\U00002026', - "hercon;": '\U000022B9', - "hfr;": '\U0001D525', - "hksearow;": '\U00002925', - "hkswarow;": '\U00002926', - "hoarr;": '\U000021FF', - "homtht;": '\U0000223B', - "hookleftarrow;": '\U000021A9', - "hookrightarrow;": '\U000021AA', - "hopf;": '\U0001D559', - "horbar;": '\U00002015', - "hscr;": '\U0001D4BD', - "hslash;": '\U0000210F', - "hstrok;": '\U00000127', - "hybull;": '\U00002043', - "hyphen;": '\U00002010', - "iacute;": '\U000000ED', - "ic;": '\U00002063', - "icirc;": '\U000000EE', - "icy;": '\U00000438', - "iecy;": '\U00000435', - "iexcl;": '\U000000A1', - "iff;": '\U000021D4', - "ifr;": '\U0001D526', - "igrave;": '\U000000EC', - "ii;": '\U00002148', - "iiiint;": '\U00002A0C', - "iiint;": '\U0000222D', - "iinfin;": '\U000029DC', - "iiota;": '\U00002129', - "ijlig;": '\U00000133', - "imacr;": '\U0000012B', - "image;": '\U00002111', - "imagline;": '\U00002110', - "imagpart;": '\U00002111', - "imath;": '\U00000131', - "imof;": '\U000022B7', - "imped;": '\U000001B5', - "in;": '\U00002208', - "incare;": '\U00002105', - "infin;": '\U0000221E', - "infintie;": '\U000029DD', - "inodot;": '\U00000131', - "int;": '\U0000222B', - "intcal;": '\U000022BA', - "integers;": '\U00002124', - "intercal;": '\U000022BA', - "intlarhk;": '\U00002A17', - "intprod;": '\U00002A3C', - "iocy;": '\U00000451', - "iogon;": '\U0000012F', - "iopf;": '\U0001D55A', - "iota;": '\U000003B9', - "iprod;": '\U00002A3C', - "iquest;": '\U000000BF', - "iscr;": '\U0001D4BE', - "isin;": '\U00002208', - "isinE;": '\U000022F9', - "isindot;": '\U000022F5', - "isins;": '\U000022F4', - "isinsv;": '\U000022F3', - "isinv;": '\U00002208', - "it;": '\U00002062', - "itilde;": '\U00000129', - "iukcy;": '\U00000456', - "iuml;": '\U000000EF', - "jcirc;": '\U00000135', - "jcy;": '\U00000439', - "jfr;": '\U0001D527', - "jmath;": '\U00000237', - "jopf;": '\U0001D55B', - "jscr;": '\U0001D4BF', - "jsercy;": '\U00000458', - "jukcy;": '\U00000454', - "kappa;": '\U000003BA', - "kappav;": '\U000003F0', - "kcedil;": '\U00000137', - "kcy;": '\U0000043A', - "kfr;": '\U0001D528', - "kgreen;": '\U00000138', - "khcy;": '\U00000445', - "kjcy;": '\U0000045C', - "kopf;": '\U0001D55C', - "kscr;": '\U0001D4C0', - "lAarr;": '\U000021DA', - "lArr;": '\U000021D0', - "lAtail;": '\U0000291B', - "lBarr;": '\U0000290E', - "lE;": '\U00002266', - "lEg;": '\U00002A8B', - "lHar;": '\U00002962', - "lacute;": '\U0000013A', - "laemptyv;": '\U000029B4', - "lagran;": '\U00002112', - "lambda;": '\U000003BB', - "lang;": '\U000027E8', - "langd;": '\U00002991', - "langle;": '\U000027E8', - "lap;": '\U00002A85', - "laquo;": '\U000000AB', - "larr;": '\U00002190', - "larrb;": '\U000021E4', - "larrbfs;": '\U0000291F', - "larrfs;": '\U0000291D', - "larrhk;": '\U000021A9', - "larrlp;": '\U000021AB', - "larrpl;": '\U00002939', - "larrsim;": '\U00002973', - "larrtl;": '\U000021A2', - "lat;": '\U00002AAB', - "latail;": '\U00002919', - "late;": '\U00002AAD', - "lbarr;": '\U0000290C', - "lbbrk;": '\U00002772', - "lbrace;": '\U0000007B', - "lbrack;": '\U0000005B', - "lbrke;": '\U0000298B', - "lbrksld;": '\U0000298F', - "lbrkslu;": '\U0000298D', - "lcaron;": '\U0000013E', - "lcedil;": '\U0000013C', - "lceil;": '\U00002308', - "lcub;": '\U0000007B', - "lcy;": '\U0000043B', - "ldca;": '\U00002936', - "ldquo;": '\U0000201C', - "ldquor;": '\U0000201E', - "ldrdhar;": '\U00002967', - "ldrushar;": '\U0000294B', - "ldsh;": '\U000021B2', - "le;": '\U00002264', - "leftarrow;": '\U00002190', - "leftarrowtail;": '\U000021A2', - "leftharpoondown;": '\U000021BD', - "leftharpoonup;": '\U000021BC', - "leftleftarrows;": '\U000021C7', - "leftrightarrow;": '\U00002194', - "leftrightarrows;": '\U000021C6', - "leftrightharpoons;": '\U000021CB', - "leftrightsquigarrow;": '\U000021AD', - "leftthreetimes;": '\U000022CB', - "leg;": '\U000022DA', - "leq;": '\U00002264', - "leqq;": '\U00002266', - "leqslant;": '\U00002A7D', - "les;": '\U00002A7D', - "lescc;": '\U00002AA8', - "lesdot;": '\U00002A7F', - "lesdoto;": '\U00002A81', - "lesdotor;": '\U00002A83', - "lesges;": '\U00002A93', - "lessapprox;": '\U00002A85', - "lessdot;": '\U000022D6', - "lesseqgtr;": '\U000022DA', - "lesseqqgtr;": '\U00002A8B', - "lessgtr;": '\U00002276', - "lesssim;": '\U00002272', - "lfisht;": '\U0000297C', - "lfloor;": '\U0000230A', - "lfr;": '\U0001D529', - "lg;": '\U00002276', - "lgE;": '\U00002A91', - "lhard;": '\U000021BD', - "lharu;": '\U000021BC', - "lharul;": '\U0000296A', - "lhblk;": '\U00002584', - "ljcy;": '\U00000459', - "ll;": '\U0000226A', - "llarr;": '\U000021C7', - "llcorner;": '\U0000231E', - "llhard;": '\U0000296B', - "lltri;": '\U000025FA', - "lmidot;": '\U00000140', - "lmoust;": '\U000023B0', - "lmoustache;": '\U000023B0', - "lnE;": '\U00002268', - "lnap;": '\U00002A89', - "lnapprox;": '\U00002A89', - "lne;": '\U00002A87', - "lneq;": '\U00002A87', - "lneqq;": '\U00002268', - "lnsim;": '\U000022E6', - "loang;": '\U000027EC', - "loarr;": '\U000021FD', - "lobrk;": '\U000027E6', - "longleftarrow;": '\U000027F5', - "longleftrightarrow;": '\U000027F7', - "longmapsto;": '\U000027FC', - "longrightarrow;": '\U000027F6', - "looparrowleft;": '\U000021AB', - "looparrowright;": '\U000021AC', - "lopar;": '\U00002985', - "lopf;": '\U0001D55D', - "loplus;": '\U00002A2D', - "lotimes;": '\U00002A34', - "lowast;": '\U00002217', - "lowbar;": '\U0000005F', - "loz;": '\U000025CA', - "lozenge;": '\U000025CA', - "lozf;": '\U000029EB', - "lpar;": '\U00000028', - "lparlt;": '\U00002993', - "lrarr;": '\U000021C6', - "lrcorner;": '\U0000231F', - "lrhar;": '\U000021CB', - "lrhard;": '\U0000296D', - "lrm;": '\U0000200E', - "lrtri;": '\U000022BF', - "lsaquo;": '\U00002039', - "lscr;": '\U0001D4C1', - "lsh;": '\U000021B0', - "lsim;": '\U00002272', - "lsime;": '\U00002A8D', - "lsimg;": '\U00002A8F', - "lsqb;": '\U0000005B', - "lsquo;": '\U00002018', - "lsquor;": '\U0000201A', - "lstrok;": '\U00000142', - "lt;": '\U0000003C', - "ltcc;": '\U00002AA6', - "ltcir;": '\U00002A79', - "ltdot;": '\U000022D6', - "lthree;": '\U000022CB', - "ltimes;": '\U000022C9', - "ltlarr;": '\U00002976', - "ltquest;": '\U00002A7B', - "ltrPar;": '\U00002996', - "ltri;": '\U000025C3', - "ltrie;": '\U000022B4', - "ltrif;": '\U000025C2', - "lurdshar;": '\U0000294A', - "luruhar;": '\U00002966', - "mDDot;": '\U0000223A', - "macr;": '\U000000AF', - "male;": '\U00002642', - "malt;": '\U00002720', - "maltese;": '\U00002720', - "map;": '\U000021A6', - "mapsto;": '\U000021A6', - "mapstodown;": '\U000021A7', - "mapstoleft;": '\U000021A4', - "mapstoup;": '\U000021A5', - "marker;": '\U000025AE', - "mcomma;": '\U00002A29', - "mcy;": '\U0000043C', - "mdash;": '\U00002014', - "measuredangle;": '\U00002221', - "mfr;": '\U0001D52A', - "mho;": '\U00002127', - "micro;": '\U000000B5', - "mid;": '\U00002223', - "midast;": '\U0000002A', - "midcir;": '\U00002AF0', - "middot;": '\U000000B7', - "minus;": '\U00002212', - "minusb;": '\U0000229F', - "minusd;": '\U00002238', - "minusdu;": '\U00002A2A', - "mlcp;": '\U00002ADB', - "mldr;": '\U00002026', - "mnplus;": '\U00002213', - "models;": '\U000022A7', - "mopf;": '\U0001D55E', - "mp;": '\U00002213', - "mscr;": '\U0001D4C2', - "mstpos;": '\U0000223E', - "mu;": '\U000003BC', - "multimap;": '\U000022B8', - "mumap;": '\U000022B8', - "nLeftarrow;": '\U000021CD', - "nLeftrightarrow;": '\U000021CE', - "nRightarrow;": '\U000021CF', - "nVDash;": '\U000022AF', - "nVdash;": '\U000022AE', - "nabla;": '\U00002207', - "nacute;": '\U00000144', - "nap;": '\U00002249', - "napos;": '\U00000149', - "napprox;": '\U00002249', - "natur;": '\U0000266E', - "natural;": '\U0000266E', - "naturals;": '\U00002115', - "nbsp;": '\U000000A0', - "ncap;": '\U00002A43', - "ncaron;": '\U00000148', - "ncedil;": '\U00000146', - "ncong;": '\U00002247', - "ncup;": '\U00002A42', - "ncy;": '\U0000043D', - "ndash;": '\U00002013', - "ne;": '\U00002260', - "neArr;": '\U000021D7', - "nearhk;": '\U00002924', - "nearr;": '\U00002197', - "nearrow;": '\U00002197', - "nequiv;": '\U00002262', - "nesear;": '\U00002928', - "nexist;": '\U00002204', - "nexists;": '\U00002204', - "nfr;": '\U0001D52B', - "nge;": '\U00002271', - "ngeq;": '\U00002271', - "ngsim;": '\U00002275', - "ngt;": '\U0000226F', - "ngtr;": '\U0000226F', - "nhArr;": '\U000021CE', - "nharr;": '\U000021AE', - "nhpar;": '\U00002AF2', - "ni;": '\U0000220B', - "nis;": '\U000022FC', - "nisd;": '\U000022FA', - "niv;": '\U0000220B', - "njcy;": '\U0000045A', - "nlArr;": '\U000021CD', - "nlarr;": '\U0000219A', - "nldr;": '\U00002025', - "nle;": '\U00002270', - "nleftarrow;": '\U0000219A', - "nleftrightarrow;": '\U000021AE', - "nleq;": '\U00002270', - "nless;": '\U0000226E', - "nlsim;": '\U00002274', - "nlt;": '\U0000226E', - "nltri;": '\U000022EA', - "nltrie;": '\U000022EC', - "nmid;": '\U00002224', - "nopf;": '\U0001D55F', - "not;": '\U000000AC', - "notin;": '\U00002209', - "notinva;": '\U00002209', - "notinvb;": '\U000022F7', - "notinvc;": '\U000022F6', - "notni;": '\U0000220C', - "notniva;": '\U0000220C', - "notnivb;": '\U000022FE', - "notnivc;": '\U000022FD', - "npar;": '\U00002226', - "nparallel;": '\U00002226', - "npolint;": '\U00002A14', - "npr;": '\U00002280', - "nprcue;": '\U000022E0', - "nprec;": '\U00002280', - "nrArr;": '\U000021CF', - "nrarr;": '\U0000219B', - "nrightarrow;": '\U0000219B', - "nrtri;": '\U000022EB', - "nrtrie;": '\U000022ED', - "nsc;": '\U00002281', - "nsccue;": '\U000022E1', - "nscr;": '\U0001D4C3', - "nshortmid;": '\U00002224', - "nshortparallel;": '\U00002226', - "nsim;": '\U00002241', - "nsime;": '\U00002244', - "nsimeq;": '\U00002244', - "nsmid;": '\U00002224', - "nspar;": '\U00002226', - "nsqsube;": '\U000022E2', - "nsqsupe;": '\U000022E3', - "nsub;": '\U00002284', - "nsube;": '\U00002288', - "nsubseteq;": '\U00002288', - "nsucc;": '\U00002281', - "nsup;": '\U00002285', - "nsupe;": '\U00002289', - "nsupseteq;": '\U00002289', - "ntgl;": '\U00002279', - "ntilde;": '\U000000F1', - "ntlg;": '\U00002278', - "ntriangleleft;": '\U000022EA', - "ntrianglelefteq;": '\U000022EC', - "ntriangleright;": '\U000022EB', - "ntrianglerighteq;": '\U000022ED', - "nu;": '\U000003BD', - "num;": '\U00000023', - "numero;": '\U00002116', - "numsp;": '\U00002007', - "nvDash;": '\U000022AD', - "nvHarr;": '\U00002904', - "nvdash;": '\U000022AC', - "nvinfin;": '\U000029DE', - "nvlArr;": '\U00002902', - "nvrArr;": '\U00002903', - "nwArr;": '\U000021D6', - "nwarhk;": '\U00002923', - "nwarr;": '\U00002196', - "nwarrow;": '\U00002196', - "nwnear;": '\U00002927', - "oS;": '\U000024C8', - "oacute;": '\U000000F3', - "oast;": '\U0000229B', - "ocir;": '\U0000229A', - "ocirc;": '\U000000F4', - "ocy;": '\U0000043E', - "odash;": '\U0000229D', - "odblac;": '\U00000151', - "odiv;": '\U00002A38', - "odot;": '\U00002299', - "odsold;": '\U000029BC', - "oelig;": '\U00000153', - "ofcir;": '\U000029BF', - "ofr;": '\U0001D52C', - "ogon;": '\U000002DB', - "ograve;": '\U000000F2', - "ogt;": '\U000029C1', - "ohbar;": '\U000029B5', - "ohm;": '\U000003A9', - "oint;": '\U0000222E', - "olarr;": '\U000021BA', - "olcir;": '\U000029BE', - "olcross;": '\U000029BB', - "oline;": '\U0000203E', - "olt;": '\U000029C0', - "omacr;": '\U0000014D', - "omega;": '\U000003C9', - "omicron;": '\U000003BF', - "omid;": '\U000029B6', - "ominus;": '\U00002296', - "oopf;": '\U0001D560', - "opar;": '\U000029B7', - "operp;": '\U000029B9', - "oplus;": '\U00002295', - "or;": '\U00002228', - "orarr;": '\U000021BB', - "ord;": '\U00002A5D', - "order;": '\U00002134', - "orderof;": '\U00002134', - "ordf;": '\U000000AA', - "ordm;": '\U000000BA', - "origof;": '\U000022B6', - "oror;": '\U00002A56', - "orslope;": '\U00002A57', - "orv;": '\U00002A5B', - "oscr;": '\U00002134', - "oslash;": '\U000000F8', - "osol;": '\U00002298', - "otilde;": '\U000000F5', - "otimes;": '\U00002297', - "otimesas;": '\U00002A36', - "ouml;": '\U000000F6', - "ovbar;": '\U0000233D', - "par;": '\U00002225', - "para;": '\U000000B6', - "parallel;": '\U00002225', - "parsim;": '\U00002AF3', - "parsl;": '\U00002AFD', - "part;": '\U00002202', - "pcy;": '\U0000043F', - "percnt;": '\U00000025', - "period;": '\U0000002E', - "permil;": '\U00002030', - "perp;": '\U000022A5', - "pertenk;": '\U00002031', - "pfr;": '\U0001D52D', - "phi;": '\U000003C6', - "phiv;": '\U000003D5', - "phmmat;": '\U00002133', - "phone;": '\U0000260E', - "pi;": '\U000003C0', - "pitchfork;": '\U000022D4', - "piv;": '\U000003D6', - "planck;": '\U0000210F', - "planckh;": '\U0000210E', - "plankv;": '\U0000210F', - "plus;": '\U0000002B', - "plusacir;": '\U00002A23', - "plusb;": '\U0000229E', - "pluscir;": '\U00002A22', - "plusdo;": '\U00002214', - "plusdu;": '\U00002A25', - "pluse;": '\U00002A72', - "plusmn;": '\U000000B1', - "plussim;": '\U00002A26', - "plustwo;": '\U00002A27', - "pm;": '\U000000B1', - "pointint;": '\U00002A15', - "popf;": '\U0001D561', - "pound;": '\U000000A3', - "pr;": '\U0000227A', - "prE;": '\U00002AB3', - "prap;": '\U00002AB7', - "prcue;": '\U0000227C', - "pre;": '\U00002AAF', - "prec;": '\U0000227A', - "precapprox;": '\U00002AB7', - "preccurlyeq;": '\U0000227C', - "preceq;": '\U00002AAF', - "precnapprox;": '\U00002AB9', - "precneqq;": '\U00002AB5', - "precnsim;": '\U000022E8', - "precsim;": '\U0000227E', - "prime;": '\U00002032', - "primes;": '\U00002119', - "prnE;": '\U00002AB5', - "prnap;": '\U00002AB9', - "prnsim;": '\U000022E8', - "prod;": '\U0000220F', - "profalar;": '\U0000232E', - "profline;": '\U00002312', - "profsurf;": '\U00002313', - "prop;": '\U0000221D', - "propto;": '\U0000221D', - "prsim;": '\U0000227E', - "prurel;": '\U000022B0', - "pscr;": '\U0001D4C5', - "psi;": '\U000003C8', - "puncsp;": '\U00002008', - "qfr;": '\U0001D52E', - "qint;": '\U00002A0C', - "qopf;": '\U0001D562', - "qprime;": '\U00002057', - "qscr;": '\U0001D4C6', - "quaternions;": '\U0000210D', - "quatint;": '\U00002A16', - "quest;": '\U0000003F', - "questeq;": '\U0000225F', - "quot;": '\U00000022', - "rAarr;": '\U000021DB', - "rArr;": '\U000021D2', - "rAtail;": '\U0000291C', - "rBarr;": '\U0000290F', - "rHar;": '\U00002964', - "racute;": '\U00000155', - "radic;": '\U0000221A', - "raemptyv;": '\U000029B3', - "rang;": '\U000027E9', - "rangd;": '\U00002992', - "range;": '\U000029A5', - "rangle;": '\U000027E9', - "raquo;": '\U000000BB', - "rarr;": '\U00002192', - "rarrap;": '\U00002975', - "rarrb;": '\U000021E5', - "rarrbfs;": '\U00002920', - "rarrc;": '\U00002933', - "rarrfs;": '\U0000291E', - "rarrhk;": '\U000021AA', - "rarrlp;": '\U000021AC', - "rarrpl;": '\U00002945', - "rarrsim;": '\U00002974', - "rarrtl;": '\U000021A3', - "rarrw;": '\U0000219D', - "ratail;": '\U0000291A', - "ratio;": '\U00002236', - "rationals;": '\U0000211A', - "rbarr;": '\U0000290D', - "rbbrk;": '\U00002773', - "rbrace;": '\U0000007D', - "rbrack;": '\U0000005D', - "rbrke;": '\U0000298C', - "rbrksld;": '\U0000298E', - "rbrkslu;": '\U00002990', - "rcaron;": '\U00000159', - "rcedil;": '\U00000157', - "rceil;": '\U00002309', - "rcub;": '\U0000007D', - "rcy;": '\U00000440', - "rdca;": '\U00002937', - "rdldhar;": '\U00002969', - "rdquo;": '\U0000201D', - "rdquor;": '\U0000201D', - "rdsh;": '\U000021B3', - "real;": '\U0000211C', - "realine;": '\U0000211B', - "realpart;": '\U0000211C', - "reals;": '\U0000211D', - "rect;": '\U000025AD', - "reg;": '\U000000AE', - "rfisht;": '\U0000297D', - "rfloor;": '\U0000230B', - "rfr;": '\U0001D52F', - "rhard;": '\U000021C1', - "rharu;": '\U000021C0', - "rharul;": '\U0000296C', - "rho;": '\U000003C1', - "rhov;": '\U000003F1', - "rightarrow;": '\U00002192', - "rightarrowtail;": '\U000021A3', - "rightharpoondown;": '\U000021C1', - "rightharpoonup;": '\U000021C0', - "rightleftarrows;": '\U000021C4', - "rightleftharpoons;": '\U000021CC', - "rightrightarrows;": '\U000021C9', - "rightsquigarrow;": '\U0000219D', - "rightthreetimes;": '\U000022CC', - "ring;": '\U000002DA', - "risingdotseq;": '\U00002253', - "rlarr;": '\U000021C4', - "rlhar;": '\U000021CC', - "rlm;": '\U0000200F', - "rmoust;": '\U000023B1', - "rmoustache;": '\U000023B1', - "rnmid;": '\U00002AEE', - "roang;": '\U000027ED', - "roarr;": '\U000021FE', - "robrk;": '\U000027E7', - "ropar;": '\U00002986', - "ropf;": '\U0001D563', - "roplus;": '\U00002A2E', - "rotimes;": '\U00002A35', - "rpar;": '\U00000029', - "rpargt;": '\U00002994', - "rppolint;": '\U00002A12', - "rrarr;": '\U000021C9', - "rsaquo;": '\U0000203A', - "rscr;": '\U0001D4C7', - "rsh;": '\U000021B1', - "rsqb;": '\U0000005D', - "rsquo;": '\U00002019', - "rsquor;": '\U00002019', - "rthree;": '\U000022CC', - "rtimes;": '\U000022CA', - "rtri;": '\U000025B9', - "rtrie;": '\U000022B5', - "rtrif;": '\U000025B8', - "rtriltri;": '\U000029CE', - "ruluhar;": '\U00002968', - "rx;": '\U0000211E', - "sacute;": '\U0000015B', - "sbquo;": '\U0000201A', - "sc;": '\U0000227B', - "scE;": '\U00002AB4', - "scap;": '\U00002AB8', - "scaron;": '\U00000161', - "sccue;": '\U0000227D', - "sce;": '\U00002AB0', - "scedil;": '\U0000015F', - "scirc;": '\U0000015D', - "scnE;": '\U00002AB6', - "scnap;": '\U00002ABA', - "scnsim;": '\U000022E9', - "scpolint;": '\U00002A13', - "scsim;": '\U0000227F', - "scy;": '\U00000441', - "sdot;": '\U000022C5', - "sdotb;": '\U000022A1', - "sdote;": '\U00002A66', - "seArr;": '\U000021D8', - "searhk;": '\U00002925', - "searr;": '\U00002198', - "searrow;": '\U00002198', - "sect;": '\U000000A7', - "semi;": '\U0000003B', - "seswar;": '\U00002929', - "setminus;": '\U00002216', - "setmn;": '\U00002216', - "sext;": '\U00002736', - "sfr;": '\U0001D530', - "sfrown;": '\U00002322', - "sharp;": '\U0000266F', - "shchcy;": '\U00000449', - "shcy;": '\U00000448', - "shortmid;": '\U00002223', - "shortparallel;": '\U00002225', - "shy;": '\U000000AD', - "sigma;": '\U000003C3', - "sigmaf;": '\U000003C2', - "sigmav;": '\U000003C2', - "sim;": '\U0000223C', - "simdot;": '\U00002A6A', - "sime;": '\U00002243', - "simeq;": '\U00002243', - "simg;": '\U00002A9E', - "simgE;": '\U00002AA0', - "siml;": '\U00002A9D', - "simlE;": '\U00002A9F', - "simne;": '\U00002246', - "simplus;": '\U00002A24', - "simrarr;": '\U00002972', - "slarr;": '\U00002190', - "smallsetminus;": '\U00002216', - "smashp;": '\U00002A33', - "smeparsl;": '\U000029E4', - "smid;": '\U00002223', - "smile;": '\U00002323', - "smt;": '\U00002AAA', - "smte;": '\U00002AAC', - "softcy;": '\U0000044C', - "sol;": '\U0000002F', - "solb;": '\U000029C4', - "solbar;": '\U0000233F', - "sopf;": '\U0001D564', - "spades;": '\U00002660', - "spadesuit;": '\U00002660', - "spar;": '\U00002225', - "sqcap;": '\U00002293', - "sqcup;": '\U00002294', - "sqsub;": '\U0000228F', - "sqsube;": '\U00002291', - "sqsubset;": '\U0000228F', - "sqsubseteq;": '\U00002291', - "sqsup;": '\U00002290', - "sqsupe;": '\U00002292', - "sqsupset;": '\U00002290', - "sqsupseteq;": '\U00002292', - "squ;": '\U000025A1', - "square;": '\U000025A1', - "squarf;": '\U000025AA', - "squf;": '\U000025AA', - "srarr;": '\U00002192', - "sscr;": '\U0001D4C8', - "ssetmn;": '\U00002216', - "ssmile;": '\U00002323', - "sstarf;": '\U000022C6', - "star;": '\U00002606', - "starf;": '\U00002605', - "straightepsilon;": '\U000003F5', - "straightphi;": '\U000003D5', - "strns;": '\U000000AF', - "sub;": '\U00002282', - "subE;": '\U00002AC5', - "subdot;": '\U00002ABD', - "sube;": '\U00002286', - "subedot;": '\U00002AC3', - "submult;": '\U00002AC1', - "subnE;": '\U00002ACB', - "subne;": '\U0000228A', - "subplus;": '\U00002ABF', - "subrarr;": '\U00002979', - "subset;": '\U00002282', - "subseteq;": '\U00002286', - "subseteqq;": '\U00002AC5', - "subsetneq;": '\U0000228A', - "subsetneqq;": '\U00002ACB', - "subsim;": '\U00002AC7', - "subsub;": '\U00002AD5', - "subsup;": '\U00002AD3', - "succ;": '\U0000227B', - "succapprox;": '\U00002AB8', - "succcurlyeq;": '\U0000227D', - "succeq;": '\U00002AB0', - "succnapprox;": '\U00002ABA', - "succneqq;": '\U00002AB6', - "succnsim;": '\U000022E9', - "succsim;": '\U0000227F', - "sum;": '\U00002211', - "sung;": '\U0000266A', - "sup;": '\U00002283', - "sup1;": '\U000000B9', - "sup2;": '\U000000B2', - "sup3;": '\U000000B3', - "supE;": '\U00002AC6', - "supdot;": '\U00002ABE', - "supdsub;": '\U00002AD8', - "supe;": '\U00002287', - "supedot;": '\U00002AC4', - "suphsol;": '\U000027C9', - "suphsub;": '\U00002AD7', - "suplarr;": '\U0000297B', - "supmult;": '\U00002AC2', - "supnE;": '\U00002ACC', - "supne;": '\U0000228B', - "supplus;": '\U00002AC0', - "supset;": '\U00002283', - "supseteq;": '\U00002287', - "supseteqq;": '\U00002AC6', - "supsetneq;": '\U0000228B', - "supsetneqq;": '\U00002ACC', - "supsim;": '\U00002AC8', - "supsub;": '\U00002AD4', - "supsup;": '\U00002AD6', - "swArr;": '\U000021D9', - "swarhk;": '\U00002926', - "swarr;": '\U00002199', - "swarrow;": '\U00002199', - "swnwar;": '\U0000292A', - "szlig;": '\U000000DF', - "target;": '\U00002316', - "tau;": '\U000003C4', - "tbrk;": '\U000023B4', - "tcaron;": '\U00000165', - "tcedil;": '\U00000163', - "tcy;": '\U00000442', - "tdot;": '\U000020DB', - "telrec;": '\U00002315', - "tfr;": '\U0001D531', - "there4;": '\U00002234', - "therefore;": '\U00002234', - "theta;": '\U000003B8', - "thetasym;": '\U000003D1', - "thetav;": '\U000003D1', - "thickapprox;": '\U00002248', - "thicksim;": '\U0000223C', - "thinsp;": '\U00002009', - "thkap;": '\U00002248', - "thksim;": '\U0000223C', - "thorn;": '\U000000FE', - "tilde;": '\U000002DC', - "times;": '\U000000D7', - "timesb;": '\U000022A0', - "timesbar;": '\U00002A31', - "timesd;": '\U00002A30', - "tint;": '\U0000222D', - "toea;": '\U00002928', - "top;": '\U000022A4', - "topbot;": '\U00002336', - "topcir;": '\U00002AF1', - "topf;": '\U0001D565', - "topfork;": '\U00002ADA', - "tosa;": '\U00002929', - "tprime;": '\U00002034', - "trade;": '\U00002122', - "triangle;": '\U000025B5', - "triangledown;": '\U000025BF', - "triangleleft;": '\U000025C3', - "trianglelefteq;": '\U000022B4', - "triangleq;": '\U0000225C', - "triangleright;": '\U000025B9', - "trianglerighteq;": '\U000022B5', - "tridot;": '\U000025EC', - "trie;": '\U0000225C', - "triminus;": '\U00002A3A', - "triplus;": '\U00002A39', - "trisb;": '\U000029CD', - "tritime;": '\U00002A3B', - "trpezium;": '\U000023E2', - "tscr;": '\U0001D4C9', - "tscy;": '\U00000446', - "tshcy;": '\U0000045B', - "tstrok;": '\U00000167', - "twixt;": '\U0000226C', - "twoheadleftarrow;": '\U0000219E', - "twoheadrightarrow;": '\U000021A0', - "uArr;": '\U000021D1', - "uHar;": '\U00002963', - "uacute;": '\U000000FA', - "uarr;": '\U00002191', - "ubrcy;": '\U0000045E', - "ubreve;": '\U0000016D', - "ucirc;": '\U000000FB', - "ucy;": '\U00000443', - "udarr;": '\U000021C5', - "udblac;": '\U00000171', - "udhar;": '\U0000296E', - "ufisht;": '\U0000297E', - "ufr;": '\U0001D532', - "ugrave;": '\U000000F9', - "uharl;": '\U000021BF', - "uharr;": '\U000021BE', - "uhblk;": '\U00002580', - "ulcorn;": '\U0000231C', - "ulcorner;": '\U0000231C', - "ulcrop;": '\U0000230F', - "ultri;": '\U000025F8', - "umacr;": '\U0000016B', - "uml;": '\U000000A8', - "uogon;": '\U00000173', - "uopf;": '\U0001D566', - "uparrow;": '\U00002191', - "updownarrow;": '\U00002195', - "upharpoonleft;": '\U000021BF', - "upharpoonright;": '\U000021BE', - "uplus;": '\U0000228E', - "upsi;": '\U000003C5', - "upsih;": '\U000003D2', - "upsilon;": '\U000003C5', - "upuparrows;": '\U000021C8', - "urcorn;": '\U0000231D', - "urcorner;": '\U0000231D', - "urcrop;": '\U0000230E', - "uring;": '\U0000016F', - "urtri;": '\U000025F9', - "uscr;": '\U0001D4CA', - "utdot;": '\U000022F0', - "utilde;": '\U00000169', - "utri;": '\U000025B5', - "utrif;": '\U000025B4', - "uuarr;": '\U000021C8', - "uuml;": '\U000000FC', - "uwangle;": '\U000029A7', - "vArr;": '\U000021D5', - "vBar;": '\U00002AE8', - "vBarv;": '\U00002AE9', - "vDash;": '\U000022A8', - "vangrt;": '\U0000299C', - "varepsilon;": '\U000003F5', - "varkappa;": '\U000003F0', - "varnothing;": '\U00002205', - "varphi;": '\U000003D5', - "varpi;": '\U000003D6', - "varpropto;": '\U0000221D', - "varr;": '\U00002195', - "varrho;": '\U000003F1', - "varsigma;": '\U000003C2', - "vartheta;": '\U000003D1', - "vartriangleleft;": '\U000022B2', - "vartriangleright;": '\U000022B3', - "vcy;": '\U00000432', - "vdash;": '\U000022A2', - "vee;": '\U00002228', - "veebar;": '\U000022BB', - "veeeq;": '\U0000225A', - "vellip;": '\U000022EE', - "verbar;": '\U0000007C', - "vert;": '\U0000007C', - "vfr;": '\U0001D533', - "vltri;": '\U000022B2', - "vopf;": '\U0001D567', - "vprop;": '\U0000221D', - "vrtri;": '\U000022B3', - "vscr;": '\U0001D4CB', - "vzigzag;": '\U0000299A', - "wcirc;": '\U00000175', - "wedbar;": '\U00002A5F', - "wedge;": '\U00002227', - "wedgeq;": '\U00002259', - "weierp;": '\U00002118', - "wfr;": '\U0001D534', - "wopf;": '\U0001D568', - "wp;": '\U00002118', - "wr;": '\U00002240', - "wreath;": '\U00002240', - "wscr;": '\U0001D4CC', - "xcap;": '\U000022C2', - "xcirc;": '\U000025EF', - "xcup;": '\U000022C3', - "xdtri;": '\U000025BD', - "xfr;": '\U0001D535', - "xhArr;": '\U000027FA', - "xharr;": '\U000027F7', - "xi;": '\U000003BE', - "xlArr;": '\U000027F8', - "xlarr;": '\U000027F5', - "xmap;": '\U000027FC', - "xnis;": '\U000022FB', - "xodot;": '\U00002A00', - "xopf;": '\U0001D569', - "xoplus;": '\U00002A01', - "xotime;": '\U00002A02', - "xrArr;": '\U000027F9', - "xrarr;": '\U000027F6', - "xscr;": '\U0001D4CD', - "xsqcup;": '\U00002A06', - "xuplus;": '\U00002A04', - "xutri;": '\U000025B3', - "xvee;": '\U000022C1', - "xwedge;": '\U000022C0', - "yacute;": '\U000000FD', - "yacy;": '\U0000044F', - "ycirc;": '\U00000177', - "ycy;": '\U0000044B', - "yen;": '\U000000A5', - "yfr;": '\U0001D536', - "yicy;": '\U00000457', - "yopf;": '\U0001D56A', - "yscr;": '\U0001D4CE', - "yucy;": '\U0000044E', - "yuml;": '\U000000FF', - "zacute;": '\U0000017A', - "zcaron;": '\U0000017E', - "zcy;": '\U00000437', - "zdot;": '\U0000017C', - "zeetrf;": '\U00002128', - "zeta;": '\U000003B6', - "zfr;": '\U0001D537', - "zhcy;": '\U00000436', - "zigrarr;": '\U000021DD', - "zopf;": '\U0001D56B', - "zscr;": '\U0001D4CF', - "zwj;": '\U0000200D', - "zwnj;": '\U0000200C', - "AElig": '\U000000C6', - "AMP": '\U00000026', - "Aacute": '\U000000C1', - "Acirc": '\U000000C2', - "Agrave": '\U000000C0', - "Aring": '\U000000C5', - "Atilde": '\U000000C3', - "Auml": '\U000000C4', - "COPY": '\U000000A9', - "Ccedil": '\U000000C7', - "ETH": '\U000000D0', - "Eacute": '\U000000C9', - "Ecirc": '\U000000CA', - "Egrave": '\U000000C8', - "Euml": '\U000000CB', - "GT": '\U0000003E', - "Iacute": '\U000000CD', - "Icirc": '\U000000CE', - "Igrave": '\U000000CC', - "Iuml": '\U000000CF', - "LT": '\U0000003C', - "Ntilde": '\U000000D1', - "Oacute": '\U000000D3', - "Ocirc": '\U000000D4', - "Ograve": '\U000000D2', - "Oslash": '\U000000D8', - "Otilde": '\U000000D5', - "Ouml": '\U000000D6', - "QUOT": '\U00000022', - "REG": '\U000000AE', - "THORN": '\U000000DE', - "Uacute": '\U000000DA', - "Ucirc": '\U000000DB', - "Ugrave": '\U000000D9', - "Uuml": '\U000000DC', - "Yacute": '\U000000DD', - "aacute": '\U000000E1', - "acirc": '\U000000E2', - "acute": '\U000000B4', - "aelig": '\U000000E6', - "agrave": '\U000000E0', - "amp": '\U00000026', - "aring": '\U000000E5', - "atilde": '\U000000E3', - "auml": '\U000000E4', - "brvbar": '\U000000A6', - "ccedil": '\U000000E7', - "cedil": '\U000000B8', - "cent": '\U000000A2', - "copy": '\U000000A9', - "curren": '\U000000A4', - "deg": '\U000000B0', - "divide": '\U000000F7', - "eacute": '\U000000E9', - "ecirc": '\U000000EA', - "egrave": '\U000000E8', - "eth": '\U000000F0', - "euml": '\U000000EB', - "frac12": '\U000000BD', - "frac14": '\U000000BC', - "frac34": '\U000000BE', - "gt": '\U0000003E', - "iacute": '\U000000ED', - "icirc": '\U000000EE', - "iexcl": '\U000000A1', - "igrave": '\U000000EC', - "iquest": '\U000000BF', - "iuml": '\U000000EF', - "laquo": '\U000000AB', - "lt": '\U0000003C', - "macr": '\U000000AF', - "micro": '\U000000B5', - "middot": '\U000000B7', - "nbsp": '\U000000A0', - "not": '\U000000AC', - "ntilde": '\U000000F1', - "oacute": '\U000000F3', - "ocirc": '\U000000F4', - "ograve": '\U000000F2', - "ordf": '\U000000AA', - "ordm": '\U000000BA', - "oslash": '\U000000F8', - "otilde": '\U000000F5', - "ouml": '\U000000F6', - "para": '\U000000B6', - "plusmn": '\U000000B1', - "pound": '\U000000A3', - "quot": '\U00000022', - "raquo": '\U000000BB', - "reg": '\U000000AE', - "sect": '\U000000A7', - "shy": '\U000000AD', - "sup1": '\U000000B9', - "sup2": '\U000000B2', - "sup3": '\U000000B3', - "szlig": '\U000000DF', - "thorn": '\U000000FE', - "times": '\U000000D7', - "uacute": '\U000000FA', - "ucirc": '\U000000FB', - "ugrave": '\U000000F9', - "uml": '\U000000A8', - "uuml": '\U000000FC', - "yacute": '\U000000FD', - "yen": '\U000000A5', - "yuml": '\U000000FF', -} - -// HTML entities that are two unicode codepoints. -var entity2 = map[string][2]rune{ - // TODO(nigeltao): Handle replacements that are wider than their names. - // "nLt;": {'\u226A', '\u20D2'}, - // "nGt;": {'\u226B', '\u20D2'}, - "NotEqualTilde;": {'\u2242', '\u0338'}, - "NotGreaterFullEqual;": {'\u2267', '\u0338'}, - "NotGreaterGreater;": {'\u226B', '\u0338'}, - "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, - "NotHumpDownHump;": {'\u224E', '\u0338'}, - "NotHumpEqual;": {'\u224F', '\u0338'}, - "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, - "NotLessLess;": {'\u226A', '\u0338'}, - "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, - "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, - "NotNestedLessLess;": {'\u2AA1', '\u0338'}, - "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, - "NotRightTriangleBar;": {'\u29D0', '\u0338'}, - "NotSquareSubset;": {'\u228F', '\u0338'}, - "NotSquareSuperset;": {'\u2290', '\u0338'}, - "NotSubset;": {'\u2282', '\u20D2'}, - "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, - "NotSucceedsTilde;": {'\u227F', '\u0338'}, - "NotSuperset;": {'\u2283', '\u20D2'}, - "ThickSpace;": {'\u205F', '\u200A'}, - "acE;": {'\u223E', '\u0333'}, - "bne;": {'\u003D', '\u20E5'}, - "bnequiv;": {'\u2261', '\u20E5'}, - "caps;": {'\u2229', '\uFE00'}, - "cups;": {'\u222A', '\uFE00'}, - "fjlig;": {'\u0066', '\u006A'}, - "gesl;": {'\u22DB', '\uFE00'}, - "gvertneqq;": {'\u2269', '\uFE00'}, - "gvnE;": {'\u2269', '\uFE00'}, - "lates;": {'\u2AAD', '\uFE00'}, - "lesg;": {'\u22DA', '\uFE00'}, - "lvertneqq;": {'\u2268', '\uFE00'}, - "lvnE;": {'\u2268', '\uFE00'}, - "nGg;": {'\u22D9', '\u0338'}, - "nGtv;": {'\u226B', '\u0338'}, - "nLl;": {'\u22D8', '\u0338'}, - "nLtv;": {'\u226A', '\u0338'}, - "nang;": {'\u2220', '\u20D2'}, - "napE;": {'\u2A70', '\u0338'}, - "napid;": {'\u224B', '\u0338'}, - "nbump;": {'\u224E', '\u0338'}, - "nbumpe;": {'\u224F', '\u0338'}, - "ncongdot;": {'\u2A6D', '\u0338'}, - "nedot;": {'\u2250', '\u0338'}, - "nesim;": {'\u2242', '\u0338'}, - "ngE;": {'\u2267', '\u0338'}, - "ngeqq;": {'\u2267', '\u0338'}, - "ngeqslant;": {'\u2A7E', '\u0338'}, - "nges;": {'\u2A7E', '\u0338'}, - "nlE;": {'\u2266', '\u0338'}, - "nleqq;": {'\u2266', '\u0338'}, - "nleqslant;": {'\u2A7D', '\u0338'}, - "nles;": {'\u2A7D', '\u0338'}, - "notinE;": {'\u22F9', '\u0338'}, - "notindot;": {'\u22F5', '\u0338'}, - "nparsl;": {'\u2AFD', '\u20E5'}, - "npart;": {'\u2202', '\u0338'}, - "npre;": {'\u2AAF', '\u0338'}, - "npreceq;": {'\u2AAF', '\u0338'}, - "nrarrc;": {'\u2933', '\u0338'}, - "nrarrw;": {'\u219D', '\u0338'}, - "nsce;": {'\u2AB0', '\u0338'}, - "nsubE;": {'\u2AC5', '\u0338'}, - "nsubset;": {'\u2282', '\u20D2'}, - "nsubseteqq;": {'\u2AC5', '\u0338'}, - "nsucceq;": {'\u2AB0', '\u0338'}, - "nsupE;": {'\u2AC6', '\u0338'}, - "nsupset;": {'\u2283', '\u20D2'}, - "nsupseteqq;": {'\u2AC6', '\u0338'}, - "nvap;": {'\u224D', '\u20D2'}, - "nvge;": {'\u2265', '\u20D2'}, - "nvgt;": {'\u003E', '\u20D2'}, - "nvle;": {'\u2264', '\u20D2'}, - "nvlt;": {'\u003C', '\u20D2'}, - "nvltrie;": {'\u22B4', '\u20D2'}, - "nvrtrie;": {'\u22B5', '\u20D2'}, - "nvsim;": {'\u223C', '\u20D2'}, - "race;": {'\u223D', '\u0331'}, - "smtes;": {'\u2AAC', '\uFE00'}, - "sqcaps;": {'\u2293', '\uFE00'}, - "sqcups;": {'\u2294', '\uFE00'}, - "varsubsetneq;": {'\u228A', '\uFE00'}, - "varsubsetneqq;": {'\u2ACB', '\uFE00'}, - "varsupsetneq;": {'\u228B', '\uFE00'}, - "varsupsetneqq;": {'\u2ACC', '\uFE00'}, - "vnsub;": {'\u2282', '\u20D2'}, - "vnsup;": {'\u2283', '\u20D2'}, - "vsubnE;": {'\u2ACB', '\uFE00'}, - "vsubne;": {'\u228A', '\uFE00'}, - "vsupnE;": {'\u2ACC', '\uFE00'}, - "vsupne;": {'\u228B', '\uFE00'}, -} diff --git a/vendor/golang.org/x/net/html/entity_test.go b/vendor/golang.org/x/net/html/entity_test.go deleted file mode 100644 index b53f866f..00000000 --- a/vendor/golang.org/x/net/html/entity_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "testing" - "unicode/utf8" -) - -func TestEntityLength(t *testing.T) { - // We verify that the length of UTF-8 encoding of each value is <= 1 + len(key). - // The +1 comes from the leading "&". This property implies that the length of - // unescaped text is <= the length of escaped text. - for k, v := range entity { - if 1+len(k) < utf8.RuneLen(v) { - t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v)) - } - if len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' { - t.Errorf("entity name %s is %d characters, but longestEntityWithoutSemicolon=%d", k, len(k), longestEntityWithoutSemicolon) - } - } - for k, v := range entity2 { - if 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) { - t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v[0]) + string(v[1])) - } - } -} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go deleted file mode 100644 index d8561396..00000000 --- a/vendor/golang.org/x/net/html/escape.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// These replacements permit compatibility with old numeric entities that -// assumed Windows-1252 encoding. -// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference -var replacementTable = [...]rune{ - '\u20AC', // First entry is what 0x80 should be replaced with. - '\u0081', - '\u201A', - '\u0192', - '\u201E', - '\u2026', - '\u2020', - '\u2021', - '\u02C6', - '\u2030', - '\u0160', - '\u2039', - '\u0152', - '\u008D', - '\u017D', - '\u008F', - '\u0090', - '\u2018', - '\u2019', - '\u201C', - '\u201D', - '\u2022', - '\u2013', - '\u2014', - '\u02DC', - '\u2122', - '\u0161', - '\u203A', - '\u0153', - '\u009D', - '\u017E', - '\u0178', // Last entry is 0x9F. - // 0x00->'\uFFFD' is handled programmatically. - // 0x0D->'\u000D' is a no-op. -} - -// unescapeEntity reads an entity like "<" from b[src:] and writes the -// corresponding "<" to b[dst:], returning the incremented dst and src cursors. -// Precondition: b[src] == '&' && dst <= src. -// attribute should be true if parsing an attribute value. -func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { - // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference - - // i starts at 1 because we already know that s[0] == '&'. - i, s := 1, b[src:] - - if len(s) <= 1 { - b[dst] = b[src] - return dst + 1, src + 1 - } - - if s[i] == '#' { - if len(s) <= 3 { // We need to have at least "&#.". - b[dst] = b[src] - return dst + 1, src + 1 - } - i++ - c := s[i] - hex := false - if c == 'x' || c == 'X' { - hex = true - i++ - } - - x := '\x00' - for i < len(s) { - c = s[i] - i++ - if hex { - if '0' <= c && c <= '9' { - x = 16*x + rune(c) - '0' - continue - } else if 'a' <= c && c <= 'f' { - x = 16*x + rune(c) - 'a' + 10 - continue - } else if 'A' <= c && c <= 'F' { - x = 16*x + rune(c) - 'A' + 10 - continue - } - } else if '0' <= c && c <= '9' { - x = 10*x + rune(c) - '0' - continue - } - if c != ';' { - i-- - } - break - } - - if i <= 3 { // No characters matched. - b[dst] = b[src] - return dst + 1, src + 1 - } - - if 0x80 <= x && x <= 0x9F { - // Replace characters from Windows-1252 with UTF-8 equivalents. - x = replacementTable[x-0x80] - } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { - // Replace invalid characters with the replacement character. - x = '\uFFFD' - } - - return dst + utf8.EncodeRune(b[dst:], x), src + i - } - - // Consume the maximum number of characters possible, with the - // consumed characters matching one of the named references. - - for i < len(s) { - c := s[i] - i++ - // Lower-cased characters are more common in entities, so we check for them first. - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - continue - } - if c != ';' { - i-- - } - break - } - - entityName := string(s[1:i]) - if entityName == "" { - // No-op. - } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { - // No-op. - } else if x := entity[entityName]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + i - } else if x := entity2[entityName]; x[0] != 0 { - dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) - return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i - } else if !attribute { - maxLen := len(entityName) - 1 - if maxLen > longestEntityWithoutSemicolon { - maxLen = longestEntityWithoutSemicolon - } - for j := maxLen; j > 1; j-- { - if x := entity[entityName[:j]]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 - } - } - } - - dst1, src1 = dst+i, src+i - copy(b[dst:dst1], b[src:src1]) - return dst1, src1 -} - -// unescape unescapes b's entities in-place, so that "a<b" becomes "a': - esc = ">" - case '"': - // """ is shorter than """. - esc = """ - case '\r': - esc = " " - default: - panic("unrecognized escape character") - } - s = s[i+1:] - if _, err := w.WriteString(esc); err != nil { - return err - } - i = strings.IndexAny(s, escapedChars) - } - _, err := w.WriteString(s) - return err -} - -// EscapeString escapes special characters like "<" to become "<". It -// escapes only five such characters: <, >, &, ' and ". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func EscapeString(s string) string { - if strings.IndexAny(s, escapedChars) == -1 { - return s - } - var buf bytes.Buffer - escape(&buf, s) - return buf.String() -} - -// UnescapeString unescapes entities like "<" to become "<". It unescapes a -// larger range of entities than EscapeString escapes. For example, "á" -// unescapes to "á", as does "á" and "&xE1;". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func UnescapeString(s string) string { - for _, c := range s { - if c == '&' { - return string(unescape([]byte(s), false)) - } - } - return s -} diff --git a/vendor/golang.org/x/net/html/escape_test.go b/vendor/golang.org/x/net/html/escape_test.go deleted file mode 100644 index b405d4b4..00000000 --- a/vendor/golang.org/x/net/html/escape_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import "testing" - -type unescapeTest struct { - // A short description of the test case. - desc string - // The HTML text. - html string - // The unescaped text. - unescaped string -} - -var unescapeTests = []unescapeTest{ - // Handle no entities. - { - "copy", - "A\ttext\nstring", - "A\ttext\nstring", - }, - // Handle simple named entities. - { - "simple", - "& > <", - "& > <", - }, - // Handle hitting the end of the string. - { - "stringEnd", - "& &", - "& &", - }, - // Handle entities with two codepoints. - { - "multiCodepoint", - "text ⋛︀ blah", - "text \u22db\ufe00 blah", - }, - // Handle decimal numeric entities. - { - "decimalEntity", - "Delta = Δ ", - "Delta = Δ ", - }, - // Handle hexadecimal numeric entities. - { - "hexadecimalEntity", - "Lambda = λ = λ ", - "Lambda = λ = λ ", - }, - // Handle numeric early termination. - { - "numericEnds", - "&# &#x €43 © = ©f = ©", - "&# &#x €43 © = ©f = ©", - }, - // Handle numeric ISO-8859-1 entity replacements. - { - "numericReplacements", - "Footnote‡", - "Footnote‡", - }, -} - -func TestUnescape(t *testing.T) { - for _, tt := range unescapeTests { - unescaped := UnescapeString(tt.html) - if unescaped != tt.unescaped { - t.Errorf("TestUnescape %s: want %q, got %q", tt.desc, tt.unescaped, unescaped) - } - } -} - -func TestUnescapeEscape(t *testing.T) { - ss := []string{ - ``, - `abc def`, - `a & b`, - `a&b`, - `a & b`, - `"`, - `"`, - `"<&>"`, - `"<&>"`, - `3&5==1 && 0<1, "0<1", a+acute=á`, - `The special characters are: <, >, &, ' and "`, - } - for _, s := range ss { - if got := UnescapeString(EscapeString(s)); got != s { - t.Errorf("got %q want %q", got, s) - } - } -} diff --git a/vendor/golang.org/x/net/html/example_test.go b/vendor/golang.org/x/net/html/example_test.go deleted file mode 100644 index 0b06ed77..00000000 --- a/vendor/golang.org/x/net/html/example_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This example demonstrates parsing HTML data and walking the resulting tree. -package html_test - -import ( - "fmt" - "log" - "strings" - - "golang.org/x/net/html" -) - -func ExampleParse() { - s := `

Links:

` - doc, err := html.Parse(strings.NewReader(s)) - if err != nil { - log.Fatal(err) - } - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - for _, a := range n.Attr { - if a.Key == "href" { - fmt.Println(a.Val) - break - } - } - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } - } - f(doc) - // Output: - // foo - // /bar/baz -} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go deleted file mode 100644 index d3b38440..00000000 --- a/vendor/golang.org/x/net/html/foreign.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { - for i := range aa { - if newName, ok := nameMap[aa[i].Key]; ok { - aa[i].Key = newName - } - } -} - -func adjustForeignAttributes(aa []Attribute) { - for i, a := range aa { - if a.Key == "" || a.Key[0] != 'x' { - continue - } - switch a.Key { - case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", - "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": - j := strings.Index(a.Key, ":") - aa[i].Namespace = a.Key[:j] - aa[i].Key = a.Key[j+1:] - } - } -} - -func htmlIntegrationPoint(n *Node) bool { - if n.Type != ElementNode { - return false - } - switch n.Namespace { - case "math": - if n.Data == "annotation-xml" { - for _, a := range n.Attr { - if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { - return true - } - } - } - } - case "svg": - switch n.Data { - case "desc", "foreignObject", "title": - return true - } - } - return false -} - -func mathMLTextIntegrationPoint(n *Node) bool { - if n.Namespace != "math" { - return false - } - switch n.Data { - case "mi", "mo", "mn", "ms", "mtext": - return true - } - return false -} - -// Section 12.2.5.5. -var breakout = map[string]bool{ - "b": true, - "big": true, - "blockquote": true, - "body": true, - "br": true, - "center": true, - "code": true, - "dd": true, - "div": true, - "dl": true, - "dt": true, - "em": true, - "embed": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "hr": true, - "i": true, - "img": true, - "li": true, - "listing": true, - "menu": true, - "meta": true, - "nobr": true, - "ol": true, - "p": true, - "pre": true, - "ruby": true, - "s": true, - "small": true, - "span": true, - "strong": true, - "strike": true, - "sub": true, - "sup": true, - "table": true, - "tt": true, - "u": true, - "ul": true, - "var": true, -} - -// Section 12.2.5.5. -var svgTagNameAdjustments = map[string]string{ - "altglyph": "altGlyph", - "altglyphdef": "altGlyphDef", - "altglyphitem": "altGlyphItem", - "animatecolor": "animateColor", - "animatemotion": "animateMotion", - "animatetransform": "animateTransform", - "clippath": "clipPath", - "feblend": "feBlend", - "fecolormatrix": "feColorMatrix", - "fecomponenttransfer": "feComponentTransfer", - "fecomposite": "feComposite", - "feconvolvematrix": "feConvolveMatrix", - "fediffuselighting": "feDiffuseLighting", - "fedisplacementmap": "feDisplacementMap", - "fedistantlight": "feDistantLight", - "feflood": "feFlood", - "fefunca": "feFuncA", - "fefuncb": "feFuncB", - "fefuncg": "feFuncG", - "fefuncr": "feFuncR", - "fegaussianblur": "feGaussianBlur", - "feimage": "feImage", - "femerge": "feMerge", - "femergenode": "feMergeNode", - "femorphology": "feMorphology", - "feoffset": "feOffset", - "fepointlight": "fePointLight", - "fespecularlighting": "feSpecularLighting", - "fespotlight": "feSpotLight", - "fetile": "feTile", - "feturbulence": "feTurbulence", - "foreignobject": "foreignObject", - "glyphref": "glyphRef", - "lineargradient": "linearGradient", - "radialgradient": "radialGradient", - "textpath": "textPath", -} - -// Section 12.2.5.1 -var mathMLAttributeAdjustments = map[string]string{ - "definitionurl": "definitionURL", -} - -var svgAttributeAdjustments = map[string]string{ - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan", -} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go deleted file mode 100644 index 26b657ae..00000000 --- a/vendor/golang.org/x/net/html/node.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "golang.org/x/net/html/atom" -) - -// A NodeType is the type of a Node. -type NodeType uint32 - -const ( - ErrorNode NodeType = iota - TextNode - DocumentNode - ElementNode - CommentNode - DoctypeNode - scopeMarkerNode -) - -// Section 12.2.3.3 says "scope markers are inserted when entering applet -// elements, buttons, object elements, marquees, table cells, and table -// captions, and are used to prevent formatting from 'leaking'". -var scopeMarker = Node{Type: scopeMarkerNode} - -// A Node consists of a NodeType and some Data (tag name for element nodes, -// content for text) and are part of a tree of Nodes. Element nodes may also -// have a Namespace and contain a slice of Attributes. Data is unescaped, so -// that it looks like "a 0 { - return (*s)[i-1] - } - return nil -} - -// index returns the index of the top-most occurrence of n in the stack, or -1 -// if n is not present. -func (s *nodeStack) index(n *Node) int { - for i := len(*s) - 1; i >= 0; i-- { - if (*s)[i] == n { - return i - } - } - return -1 -} - -// insert inserts a node at the given index. -func (s *nodeStack) insert(i int, n *Node) { - (*s) = append(*s, nil) - copy((*s)[i+1:], (*s)[i:]) - (*s)[i] = n -} - -// remove removes a node from the stack. It is a no-op if n is not present. -func (s *nodeStack) remove(n *Node) { - i := s.index(n) - if i == -1 { - return - } - copy((*s)[i:], (*s)[i+1:]) - j := len(*s) - 1 - (*s)[j] = nil - *s = (*s)[:j] -} diff --git a/vendor/golang.org/x/net/html/node_test.go b/vendor/golang.org/x/net/html/node_test.go deleted file mode 100644 index 471102f3..00000000 --- a/vendor/golang.org/x/net/html/node_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "fmt" -) - -// checkTreeConsistency checks that a node and its descendants are all -// consistent in their parent/child/sibling relationships. -func checkTreeConsistency(n *Node) error { - return checkTreeConsistency1(n, 0) -} - -func checkTreeConsistency1(n *Node, depth int) error { - if depth == 1e4 { - return fmt.Errorf("html: tree looks like it contains a cycle") - } - if err := checkNodeConsistency(n); err != nil { - return err - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - if err := checkTreeConsistency1(c, depth+1); err != nil { - return err - } - } - return nil -} - -// checkNodeConsistency checks that a node's parent/child/sibling relationships -// are consistent. -func checkNodeConsistency(n *Node) error { - if n == nil { - return nil - } - - nParent := 0 - for p := n.Parent; p != nil; p = p.Parent { - nParent++ - if nParent == 1e4 { - return fmt.Errorf("html: parent list looks like an infinite loop") - } - } - - nForward := 0 - for c := n.FirstChild; c != nil; c = c.NextSibling { - nForward++ - if nForward == 1e6 { - return fmt.Errorf("html: forward list of children looks like an infinite loop") - } - if c.Parent != n { - return fmt.Errorf("html: inconsistent child/parent relationship") - } - } - - nBackward := 0 - for c := n.LastChild; c != nil; c = c.PrevSibling { - nBackward++ - if nBackward == 1e6 { - return fmt.Errorf("html: backward list of children looks like an infinite loop") - } - if c.Parent != n { - return fmt.Errorf("html: inconsistent child/parent relationship") - } - } - - if n.Parent != nil { - if n.Parent == n { - return fmt.Errorf("html: inconsistent parent relationship") - } - if n.Parent == n.FirstChild { - return fmt.Errorf("html: inconsistent parent/first relationship") - } - if n.Parent == n.LastChild { - return fmt.Errorf("html: inconsistent parent/last relationship") - } - if n.Parent == n.PrevSibling { - return fmt.Errorf("html: inconsistent parent/prev relationship") - } - if n.Parent == n.NextSibling { - return fmt.Errorf("html: inconsistent parent/next relationship") - } - - parentHasNAsAChild := false - for c := n.Parent.FirstChild; c != nil; c = c.NextSibling { - if c == n { - parentHasNAsAChild = true - break - } - } - if !parentHasNAsAChild { - return fmt.Errorf("html: inconsistent parent/child relationship") - } - } - - if n.PrevSibling != nil && n.PrevSibling.NextSibling != n { - return fmt.Errorf("html: inconsistent prev/next relationship") - } - if n.NextSibling != nil && n.NextSibling.PrevSibling != n { - return fmt.Errorf("html: inconsistent next/prev relationship") - } - - if (n.FirstChild == nil) != (n.LastChild == nil) { - return fmt.Errorf("html: inconsistent first/last relationship") - } - if n.FirstChild != nil && n.FirstChild == n.LastChild { - // We have a sole child. - if n.FirstChild.PrevSibling != nil || n.FirstChild.NextSibling != nil { - return fmt.Errorf("html: inconsistent sole child's sibling relationship") - } - } - - seen := map[*Node]bool{} - - var last *Node - for c := n.FirstChild; c != nil; c = c.NextSibling { - if seen[c] { - return fmt.Errorf("html: inconsistent repeated child") - } - seen[c] = true - last = c - } - if last != n.LastChild { - return fmt.Errorf("html: inconsistent last relationship") - } - - var first *Node - for c := n.LastChild; c != nil; c = c.PrevSibling { - if !seen[c] { - return fmt.Errorf("html: inconsistent missing child") - } - delete(seen, c) - first = c - } - if first != n.FirstChild { - return fmt.Errorf("html: inconsistent first relationship") - } - - if len(seen) != 0 { - return fmt.Errorf("html: inconsistent forwards/backwards child list") - } - - return nil -} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go deleted file mode 100644 index be4b2bf5..00000000 --- a/vendor/golang.org/x/net/html/parse.go +++ /dev/null @@ -1,2094 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "errors" - "fmt" - "io" - "strings" - - a "golang.org/x/net/html/atom" -) - -// A parser implements the HTML5 parsing algorithm: -// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction -type parser struct { - // tokenizer provides the tokens for the parser. - tokenizer *Tokenizer - // tok is the most recently read token. - tok Token - // Self-closing tags like
are treated as start tags, except that - // hasSelfClosingToken is set while they are being processed. - hasSelfClosingToken bool - // doc is the document root element. - doc *Node - // The stack of open elements (section 12.2.3.2) and active formatting - // elements (section 12.2.3.3). - oe, afe nodeStack - // Element pointers (section 12.2.3.4). - head, form *Node - // Other parsing state flags (section 12.2.3.5). - scripting, framesetOK bool - // im is the current insertion mode. - im insertionMode - // originalIM is the insertion mode to go back to after completing a text - // or inTableText insertion mode. - originalIM insertionMode - // fosterParenting is whether new elements should be inserted according to - // the foster parenting rules (section 12.2.5.3). - fosterParenting bool - // quirks is whether the parser is operating in "quirks mode." - quirks bool - // fragment is whether the parser is parsing an HTML fragment. - fragment bool - // context is the context element when parsing an HTML fragment - // (section 12.4). - context *Node -} - -func (p *parser) top() *Node { - if n := p.oe.top(); n != nil { - return n - } - return p.doc -} - -// Stop tags for use in popUntil. These come from section 12.2.3.2. -var ( - defaultScopeStopTags = map[string][]a.Atom{ - "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, - "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, - "svg": {a.Desc, a.ForeignObject, a.Title}, - } -) - -type scope int - -const ( - defaultScope scope = iota - listItemScope - buttonScope - tableScope - tableRowScope - tableBodyScope - selectScope -) - -// popUntil pops the stack of open elements at the highest element whose tag -// is in matchTags, provided there is no higher element in the scope's stop -// tags (as defined in section 12.2.3.2). It returns whether or not there was -// such an element. If there was not, popUntil leaves the stack unchanged. -// -// For example, the set of stop tags for table scope is: "html", "table". If -// the stack was: -// ["html", "body", "font", "table", "b", "i", "u"] -// then popUntil(tableScope, "font") would return false, but -// popUntil(tableScope, "i") would return true and the stack would become: -// ["html", "body", "font", "table", "b"] -// -// If an element's tag is in both the stop tags and matchTags, then the stack -// will be popped and the function returns true (provided, of course, there was -// no higher element in the stack that was also in the stop tags). For example, -// popUntil(tableScope, "table") returns true and leaves: -// ["html", "body", "font"] -func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { - if i := p.indexOfElementInScope(s, matchTags...); i != -1 { - p.oe = p.oe[:i] - return true - } - return false -} - -// indexOfElementInScope returns the index in p.oe of the highest element whose -// tag is in matchTags that is in scope. If no matching element is in scope, it -// returns -1. -func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - if p.oe[i].Namespace == "" { - for _, t := range matchTags { - if t == tagAtom { - return i - } - } - switch s { - case defaultScope: - // No-op. - case listItemScope: - if tagAtom == a.Ol || tagAtom == a.Ul { - return -1 - } - case buttonScope: - if tagAtom == a.Button { - return -1 - } - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table { - return -1 - } - case selectScope: - if tagAtom != a.Optgroup && tagAtom != a.Option { - return -1 - } - default: - panic("unreachable") - } - } - switch s { - case defaultScope, listItemScope, buttonScope: - for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { - if t == tagAtom { - return -1 - } - } - } - } - return -1 -} - -// elementInScope is like popUntil, except that it doesn't modify the stack of -// open elements. -func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { - return p.indexOfElementInScope(s, matchTags...) != -1 -} - -// clearStackToContext pops elements off the stack of open elements until a -// scope-defined element is found. -func (p *parser) clearStackToContext(s scope) { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - switch s { - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table { - p.oe = p.oe[:i+1] - return - } - case tableRowScope: - if tagAtom == a.Html || tagAtom == a.Tr { - p.oe = p.oe[:i+1] - return - } - case tableBodyScope: - if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { - p.oe = p.oe[:i+1] - return - } - default: - panic("unreachable") - } - } -} - -// generateImpliedEndTags pops nodes off the stack of open elements as long as -// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. -// If exceptions are specified, nodes with that name will not be popped off. -func (p *parser) generateImpliedEndTags(exceptions ...string) { - var i int -loop: - for i = len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if n.Type == ElementNode { - switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: - for _, except := range exceptions { - if n.Data == except { - break loop - } - } - continue - } - } - break - } - - p.oe = p.oe[:i+1] -} - -// addChild adds a child node n to the top element, and pushes n onto the stack -// of open elements if it is an element node. -func (p *parser) addChild(n *Node) { - if p.shouldFosterParent() { - p.fosterParent(n) - } else { - p.top().AppendChild(n) - } - - if n.Type == ElementNode { - p.oe = append(p.oe, n) - } -} - -// shouldFosterParent returns whether the next node to be added should be -// foster parented. -func (p *parser) shouldFosterParent() bool { - if p.fosterParenting { - switch p.top().DataAtom { - case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: - return true - } - } - return false -} - -// fosterParent adds a child node according to the foster parenting rules. -// Section 12.2.5.3, "foster parenting". -func (p *parser) fosterParent(n *Node) { - var table, parent, prev *Node - var i int - for i = len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].DataAtom == a.Table { - table = p.oe[i] - break - } - } - - if table == nil { - // The foster parent is the html element. - parent = p.oe[0] - } else { - parent = table.Parent - } - if parent == nil { - parent = p.oe[i-1] - } - - if table != nil { - prev = table.PrevSibling - } else { - prev = parent.LastChild - } - if prev != nil && prev.Type == TextNode && n.Type == TextNode { - prev.Data += n.Data - return - } - - parent.InsertBefore(n, table) -} - -// addText adds text to the preceding node if it is a text node, or else it -// calls addChild with a new text node. -func (p *parser) addText(text string) { - if text == "" { - return - } - - if p.shouldFosterParent() { - p.fosterParent(&Node{ - Type: TextNode, - Data: text, - }) - return - } - - t := p.top() - if n := t.LastChild; n != nil && n.Type == TextNode { - n.Data += text - return - } - p.addChild(&Node{ - Type: TextNode, - Data: text, - }) -} - -// addElement adds a child element based on the current token. -func (p *parser) addElement() { - p.addChild(&Node{ - Type: ElementNode, - DataAtom: p.tok.DataAtom, - Data: p.tok.Data, - Attr: p.tok.Attr, - }) -} - -// Section 12.2.3.3. -func (p *parser) addFormattingElement() { - tagAtom, attr := p.tok.DataAtom, p.tok.Attr - p.addElement() - - // Implement the Noah's Ark clause, but with three per family instead of two. - identicalElements := 0 -findIdenticalElements: - for i := len(p.afe) - 1; i >= 0; i-- { - n := p.afe[i] - if n.Type == scopeMarkerNode { - break - } - if n.Type != ElementNode { - continue - } - if n.Namespace != "" { - continue - } - if n.DataAtom != tagAtom { - continue - } - if len(n.Attr) != len(attr) { - continue - } - compareAttributes: - for _, t0 := range n.Attr { - for _, t1 := range attr { - if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { - // Found a match for this attribute, continue with the next attribute. - continue compareAttributes - } - } - // If we get here, there is no attribute that matches a. - // Therefore the element is not identical to the new one. - continue findIdenticalElements - } - - identicalElements++ - if identicalElements >= 3 { - p.afe.remove(n) - } - } - - p.afe = append(p.afe, p.top()) -} - -// Section 12.2.3.3. -func (p *parser) clearActiveFormattingElements() { - for { - n := p.afe.pop() - if len(p.afe) == 0 || n.Type == scopeMarkerNode { - return - } - } -} - -// Section 12.2.3.3. -func (p *parser) reconstructActiveFormattingElements() { - n := p.afe.top() - if n == nil { - return - } - if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { - return - } - i := len(p.afe) - 1 - for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { - if i == 0 { - i = -1 - break - } - i-- - n = p.afe[i] - } - for { - i++ - clone := p.afe[i].clone() - p.addChild(clone) - p.afe[i] = clone - if i == len(p.afe)-1 { - break - } - } -} - -// Section 12.2.4. -func (p *parser) acknowledgeSelfClosingTag() { - p.hasSelfClosingToken = false -} - -// An insertion mode (section 12.2.3.1) is the state transition function from -// a particular state in the HTML5 parser's state machine. It updates the -// parser's fields depending on parser.tok (where ErrorToken means EOF). -// It returns whether the token was consumed. -type insertionMode func(*parser) bool - -// setOriginalIM sets the insertion mode to return to after completing a text or -// inTableText insertion mode. -// Section 12.2.3.1, "using the rules for". -func (p *parser) setOriginalIM() { - if p.originalIM != nil { - panic("html: bad parser state: originalIM was set twice") - } - p.originalIM = p.im -} - -// Section 12.2.3.1, "reset the insertion mode". -func (p *parser) resetInsertionMode() { - for i := len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if i == 0 && p.context != nil { - n = p.context - } - - switch n.DataAtom { - case a.Select: - p.im = inSelectIM - case a.Td, a.Th: - p.im = inCellIM - case a.Tr: - p.im = inRowIM - case a.Tbody, a.Thead, a.Tfoot: - p.im = inTableBodyIM - case a.Caption: - p.im = inCaptionIM - case a.Colgroup: - p.im = inColumnGroupIM - case a.Table: - p.im = inTableIM - case a.Head: - p.im = inBodyIM - case a.Body: - p.im = inBodyIM - case a.Frameset: - p.im = inFramesetIM - case a.Html: - p.im = beforeHeadIM - default: - continue - } - return - } - p.im = inBodyIM -} - -const whitespace = " \t\r\n\f" - -// Section 12.2.5.4.1. -func initialIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - n, quirks := parseDoctype(p.tok.Data) - p.doc.AppendChild(n) - p.quirks = quirks - p.im = beforeHTMLIM - return true - } - p.quirks = true - p.im = beforeHTMLIM - return false -} - -// Section 12.2.5.4.2. -func beforeHTMLIM(p *parser) bool { - switch p.tok.Type { - case DoctypeToken: - // Ignore the token. - return true - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - if p.tok.DataAtom == a.Html { - p.addElement() - p.im = beforeHeadIM - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - } - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false -} - -// Section 12.2.5.4.3. -func beforeHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Head: - p.addElement() - p.head = p.top() - p.im = inHeadIM - return true - case a.Html: - return inBodyIM(p) - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.5.4.4. -func inHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: - p.addElement() - p.oe.pop() - p.acknowledgeSelfClosingTag() - return true - case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: - p.addElement() - p.setOriginalIM() - p.im = textIM - return true - case a.Head: - // Ignore the token. - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head: - n := p.oe.pop() - if n.DataAtom != a.Head { - panic("html: bad parser state: element not found, in the in-head insertion mode") - } - p.im = afterHeadIM - return true - case a.Body, a.Html, a.Br: - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.5.4.6. -func afterHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Body: - p.addElement() - p.framesetOK = false - p.im = inBodyIM - return true - case a.Frameset: - p.addElement() - p.im = inFramesetIM - return true - case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: - p.oe = append(p.oe, p.head) - defer p.oe.remove(p.head) - return inHeadIM(p) - case a.Head: - // Ignore the token. - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Body, a.Html, a.Br: - // Drop down to creating an implied tag. - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) - p.framesetOK = true - return false -} - -// copyAttributes copies attributes of src not found on dst to dst. -func copyAttributes(dst *Node, src Token) { - if len(src.Attr) == 0 { - return - } - attr := map[string]string{} - for _, t := range dst.Attr { - attr[t.Key] = t.Val - } - for _, t := range src.Attr { - if _, ok := attr[t.Key]; !ok { - dst.Attr = append(dst.Attr, t) - attr[t.Key] = t.Val - } - } -} - -// Section 12.2.5.4.7. -func inBodyIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - d := p.tok.Data - switch n := p.oe.top(); n.DataAtom { - case a.Pre, a.Listing: - if n.FirstChild == nil { - // Ignore a newline at the start of a
 block.
-				if d != "" && d[0] == '\r' {
-					d = d[1:]
-				}
-				if d != "" && d[0] == '\n' {
-					d = d[1:]
-				}
-			}
-		}
-		d = strings.Replace(d, "\x00", "", -1)
-		if d == "" {
-			return true
-		}
-		p.reconstructActiveFormattingElements()
-		p.addText(d)
-		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
-			// There were non-whitespace characters inserted.
-			p.framesetOK = false
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			copyAttributes(p.oe[0], p.tok)
-		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
-			return inHeadIM(p)
-		case a.Body:
-			if len(p.oe) >= 2 {
-				body := p.oe[1]
-				if body.Type == ElementNode && body.DataAtom == a.Body {
-					p.framesetOK = false
-					copyAttributes(body, p.tok)
-				}
-			}
-		case a.Frameset:
-			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
-				// Ignore the token.
-				return true
-			}
-			body := p.oe[1]
-			if body.Parent != nil {
-				body.Parent.RemoveChild(body)
-			}
-			p.oe = p.oe[:1]
-			p.addElement()
-			p.im = inFramesetIM
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(buttonScope, a.P)
-			switch n := p.top(); n.DataAtom {
-			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-				p.oe.pop()
-			}
-			p.addElement()
-		case a.Pre, a.Listing:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			// The newline, if any, will be dealt with by the TextToken case.
-			p.framesetOK = false
-		case a.Form:
-			if p.form == nil {
-				p.popUntil(buttonScope, a.P)
-				p.addElement()
-				p.form = p.top()
-			}
-		case a.Li:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Li:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Dd, a.Dt:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Dd, a.Dt:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Plaintext:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Button:
-			p.popUntil(defaultScope, a.Button)
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-		case a.A:
-			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
-				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
-					p.inBodyEndTagFormatting(a.A)
-					p.oe.remove(n)
-					p.afe.remove(n)
-					break
-				}
-			}
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.Nobr:
-			p.reconstructActiveFormattingElements()
-			if p.elementInScope(defaultScope, a.Nobr) {
-				p.inBodyEndTagFormatting(a.Nobr)
-				p.reconstructActiveFormattingElements()
-			}
-			p.addFormattingElement()
-		case a.Applet, a.Marquee, a.Object:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.afe = append(p.afe, &scopeMarker)
-			p.framesetOK = false
-		case a.Table:
-			if !p.quirks {
-				p.popUntil(buttonScope, a.P)
-			}
-			p.addElement()
-			p.framesetOK = false
-			p.im = inTableIM
-			return true
-		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			if p.tok.DataAtom == a.Input {
-				for _, t := range p.tok.Attr {
-					if t.Key == "type" {
-						if strings.ToLower(t.Val) == "hidden" {
-							// Skip setting framesetOK = false
-							return true
-						}
-					}
-				}
-			}
-			p.framesetOK = false
-		case a.Param, a.Source, a.Track:
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-		case a.Hr:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			p.framesetOK = false
-		case a.Image:
-			p.tok.DataAtom = a.Img
-			p.tok.Data = a.Img.String()
-			return false
-		case a.Isindex:
-			if p.form != nil {
-				// Ignore the token.
-				return true
-			}
-			action := ""
-			prompt := "This is a searchable index. Enter search keywords: "
-			attr := []Attribute{{Key: "name", Val: "isindex"}}
-			for _, t := range p.tok.Attr {
-				switch t.Key {
-				case "action":
-					action = t.Val
-				case "name":
-					// Ignore the attribute.
-				case "prompt":
-					prompt = t.Val
-				default:
-					attr = append(attr, t)
-				}
-			}
-			p.acknowledgeSelfClosingTag()
-			p.popUntil(buttonScope, a.P)
-			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
-			if action != "" {
-				p.form.Attr = []Attribute{{Key: "action", Val: action}}
-			}
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
-			p.addText(prompt)
-			p.addChild(&Node{
-				Type:     ElementNode,
-				DataAtom: a.Input,
-				Data:     a.Input.String(),
-				Attr:     attr,
-			})
-			p.oe.pop()
-			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
-		case a.Textarea:
-			p.addElement()
-			p.setOriginalIM()
-			p.framesetOK = false
-			p.im = textIM
-		case a.Xmp:
-			p.popUntil(buttonScope, a.P)
-			p.reconstructActiveFormattingElements()
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Iframe:
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Noembed, a.Noscript:
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Select:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-			p.im = inSelectIM
-			return true
-		case a.Optgroup, a.Option:
-			if p.top().DataAtom == a.Option {
-				p.oe.pop()
-			}
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		case a.Rp, a.Rt:
-			if p.elementInScope(defaultScope, a.Ruby) {
-				p.generateImpliedEndTags()
-			}
-			p.addElement()
-		case a.Math, a.Svg:
-			p.reconstructActiveFormattingElements()
-			if p.tok.DataAtom == a.Math {
-				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
-			} else {
-				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
-			}
-			adjustForeignAttributes(p.tok.Attr)
-			p.addElement()
-			p.top().Namespace = p.tok.Data
-			if p.hasSelfClosingToken {
-				p.oe.pop()
-				p.acknowledgeSelfClosingTag()
-			}
-			return true
-		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
-			// Ignore the token.
-		default:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Body:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.im = afterBodyIM
-			}
-		case a.Html:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
-				return false
-			}
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.Form:
-			node := p.form
-			p.form = nil
-			i := p.indexOfElementInScope(defaultScope, a.Form)
-			if node == nil || i == -1 || p.oe[i] != node {
-				// Ignore the token.
-				return true
-			}
-			p.generateImpliedEndTags()
-			p.oe.remove(node)
-		case a.P:
-			if !p.elementInScope(buttonScope, a.P) {
-				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
-			}
-			p.popUntil(buttonScope, a.P)
-		case a.Li:
-			p.popUntil(listItemScope, a.Li)
-		case a.Dd, a.Dt:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
-		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.inBodyEndTagFormatting(p.tok.DataAtom)
-		case a.Applet, a.Marquee, a.Object:
-			if p.popUntil(defaultScope, p.tok.DataAtom) {
-				p.clearActiveFormattingElements()
-			}
-		case a.Br:
-			p.tok.Type = StartTagToken
-			return false
-		default:
-			p.inBodyEndTagOther(p.tok.DataAtom)
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	}
-
-	return true
-}
-
-func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
-	// This is the "adoption agency" algorithm, described at
-	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
-
-	// TODO: this is a fairly literal line-by-line translation of that algorithm.
-	// Once the code successfully parses the comprehensive test suite, we should
-	// refactor this code to be more idiomatic.
-
-	// Steps 1-4. The outer loop.
-	for i := 0; i < 8; i++ {
-		// Step 5. Find the formatting element.
-		var formattingElement *Node
-		for j := len(p.afe) - 1; j >= 0; j-- {
-			if p.afe[j].Type == scopeMarkerNode {
-				break
-			}
-			if p.afe[j].DataAtom == tagAtom {
-				formattingElement = p.afe[j]
-				break
-			}
-		}
-		if formattingElement == nil {
-			p.inBodyEndTagOther(tagAtom)
-			return
-		}
-		feIndex := p.oe.index(formattingElement)
-		if feIndex == -1 {
-			p.afe.remove(formattingElement)
-			return
-		}
-		if !p.elementInScope(defaultScope, tagAtom) {
-			// Ignore the tag.
-			return
-		}
-
-		// Steps 9-10. Find the furthest block.
-		var furthestBlock *Node
-		for _, e := range p.oe[feIndex:] {
-			if isSpecialElement(e) {
-				furthestBlock = e
-				break
-			}
-		}
-		if furthestBlock == nil {
-			e := p.oe.pop()
-			for e != formattingElement {
-				e = p.oe.pop()
-			}
-			p.afe.remove(e)
-			return
-		}
-
-		// Steps 11-12. Find the common ancestor and bookmark node.
-		commonAncestor := p.oe[feIndex-1]
-		bookmark := p.afe.index(formattingElement)
-
-		// Step 13. The inner loop. Find the lastNode to reparent.
-		lastNode := furthestBlock
-		node := furthestBlock
-		x := p.oe.index(node)
-		// Steps 13.1-13.2
-		for j := 0; j < 3; j++ {
-			// Step 13.3.
-			x--
-			node = p.oe[x]
-			// Step 13.4 - 13.5.
-			if p.afe.index(node) == -1 {
-				p.oe.remove(node)
-				continue
-			}
-			// Step 13.6.
-			if node == formattingElement {
-				break
-			}
-			// Step 13.7.
-			clone := node.clone()
-			p.afe[p.afe.index(node)] = clone
-			p.oe[p.oe.index(node)] = clone
-			node = clone
-			// Step 13.8.
-			if lastNode == furthestBlock {
-				bookmark = p.afe.index(node) + 1
-			}
-			// Step 13.9.
-			if lastNode.Parent != nil {
-				lastNode.Parent.RemoveChild(lastNode)
-			}
-			node.AppendChild(lastNode)
-			// Step 13.10.
-			lastNode = node
-		}
-
-		// Step 14. Reparent lastNode to the common ancestor,
-		// or for misnested table nodes, to the foster parent.
-		if lastNode.Parent != nil {
-			lastNode.Parent.RemoveChild(lastNode)
-		}
-		switch commonAncestor.DataAtom {
-		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			p.fosterParent(lastNode)
-		default:
-			commonAncestor.AppendChild(lastNode)
-		}
-
-		// Steps 15-17. Reparent nodes from the furthest block's children
-		// to a clone of the formatting element.
-		clone := formattingElement.clone()
-		reparentChildren(clone, furthestBlock)
-		furthestBlock.AppendChild(clone)
-
-		// Step 18. Fix up the list of active formatting elements.
-		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
-			// Move the bookmark with the rest of the list.
-			bookmark--
-		}
-		p.afe.remove(formattingElement)
-		p.afe.insert(bookmark, clone)
-
-		// Step 19. Fix up the stack of open elements.
-		p.oe.remove(formattingElement)
-		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
-	}
-}
-
-// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
-// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content
-// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
-func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
-	for i := len(p.oe) - 1; i >= 0; i-- {
-		if p.oe[i].DataAtom == tagAtom {
-			p.oe = p.oe[:i]
-			break
-		}
-		if isSpecialElement(p.oe[i]) {
-			break
-		}
-	}
-}
-
-// Section 12.2.5.4.8.
-func textIM(p *parser) bool {
-	switch p.tok.Type {
-	case ErrorToken:
-		p.oe.pop()
-	case TextToken:
-		d := p.tok.Data
-		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
-			// Ignore a newline at the start of a -->
-#errors
-#document
-| 
-|   
-|   
-|     -->
-#errors
-#document
-| 
-|   
-|   
-|     
-#errors
-Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
-#document
-| 
-|   
-|   
-|     
-#errors
-Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE.
-Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element.
-Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element.
-Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element.
-Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element.
-Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element.
-Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element.
-Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element.
-Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element.
-Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element.
-Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element.
-Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element.
-Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element.
-Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element.
-Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element.
-Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element.
-Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element.
-Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element.
-Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element.
-Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element.
-Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element.
-Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element.
-Line: 1 Col: 130 Unexpected end tag (br). Treated as br element.
-Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
-Line: 1 Col: 140 This element (img) has no end tag.
-Line: 1 Col: 148 Unexpected end tag (title). Ignored.
-Line: 1 Col: 155 Unexpected end tag (span). Ignored.
-Line: 1 Col: 163 Unexpected end tag (style). Ignored.
-Line: 1 Col: 172 Unexpected end tag (script). Ignored.
-Line: 1 Col: 180 Unexpected end tag (table). Ignored.
-Line: 1 Col: 185 Unexpected end tag (th). Ignored.
-Line: 1 Col: 190 Unexpected end tag (td). Ignored.
-Line: 1 Col: 195 Unexpected end tag (tr). Ignored.
-Line: 1 Col: 203 This element (frame) has no end tag.
-Line: 1 Col: 210 This element (area) has no end tag.
-Line: 1 Col: 217 Unexpected end tag (link). Ignored.
-Line: 1 Col: 225 This element (param) has no end tag.
-Line: 1 Col: 230 This element (hr) has no end tag.
-Line: 1 Col: 238 This element (input) has no end tag.
-Line: 1 Col: 244 Unexpected end tag (col). Ignored.
-Line: 1 Col: 251 Unexpected end tag (base). Ignored.
-Line: 1 Col: 258 Unexpected end tag (meta). Ignored.
-Line: 1 Col: 269 This element (basefont) has no end tag.
-Line: 1 Col: 279 This element (bgsound) has no end tag.
-Line: 1 Col: 287 This element (embed) has no end tag.
-Line: 1 Col: 296 This element (spacer) has no end tag.
-Line: 1 Col: 300 Unexpected end tag (p). Ignored.
-Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag.
-Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag.
-Line: 1 Col: 320 Unexpected end tag (caption). Ignored.
-Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored.
-Line: 1 Col: 339 Unexpected end tag (tbody). Ignored.
-Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored.
-Line: 1 Col: 355 Unexpected end tag (thead). Ignored.
-Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag.
-Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag.
-Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag.
-Line: 1 Col: 393 Unexpected end tag (dir). Ignored.
-Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag.
-Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag.
-Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag.
-Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag.
-Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag.
-Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag.
-Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag.
-Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag.
-Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
-Line: 1 Col: 460 This element (wbr) has no end tag.
-Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag.
-Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag.
-Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag.
-Line: 1 Col: 513 Unexpected end tag (html). Ignored.
-Line: 1 Col: 513 Unexpected end tag (frameset). Ignored.
-Line: 1 Col: 520 Unexpected end tag (head). Ignored.
-Line: 1 Col: 529 Unexpected end tag (iframe). Ignored.
-Line: 1 Col: 537 This element (image) has no end tag.
-Line: 1 Col: 547 This element (isindex) has no end tag.
-Line: 1 Col: 557 Unexpected end tag (noembed). Ignored.
-Line: 1 Col: 568 Unexpected end tag (noframes). Ignored.
-Line: 1 Col: 579 Unexpected end tag (noscript). Ignored.
-Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored.
-Line: 1 Col: 599 Unexpected end tag (option). Ignored.
-Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored.
-Line: 1 Col: 622 Unexpected end tag (textarea). Ignored.
-#document
-| 
-|   
-|   
-|     
-|

- -#data -

-#errors -Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. -Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode. -Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode. -Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode. -Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode. -Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode. -Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode. -Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode. -Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode. -Line: 1 Col: 58 Unexpected end tag (blink). Ignored. -Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode. -Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode. -Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag. -Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode. -Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode. -Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode. -Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode. -Line: 1 Col: 99 Unexpected end tag (select). Ignored. -Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode. -Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag. -Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode. -Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag. -Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode. -Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag. -Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode. -Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag. -Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode. -Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag. -Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode. -Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag. -Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored. -Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode. -Line: 1 Col: 141 Unexpected end tag (br). Treated as br element. -Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode. -Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode. -Line: 1 Col: 151 This element (img) has no end tag. -Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode. -Line: 1 Col: 159 Unexpected end tag (title). Ignored. -Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode. -Line: 1 Col: 166 Unexpected end tag (span). Ignored. -Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode. -Line: 1 Col: 174 Unexpected end tag (style). Ignored. -Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode. -Line: 1 Col: 183 Unexpected end tag (script). Ignored. -Line: 1 Col: 196 Unexpected end tag (th). Ignored. -Line: 1 Col: 201 Unexpected end tag (td). Ignored. -Line: 1 Col: 206 Unexpected end tag (tr). Ignored. -Line: 1 Col: 214 This element (frame) has no end tag. -Line: 1 Col: 221 This element (area) has no end tag. -Line: 1 Col: 228 Unexpected end tag (link). Ignored. -Line: 1 Col: 236 This element (param) has no end tag. -Line: 1 Col: 241 This element (hr) has no end tag. -Line: 1 Col: 249 This element (input) has no end tag. -Line: 1 Col: 255 Unexpected end tag (col). Ignored. -Line: 1 Col: 262 Unexpected end tag (base). Ignored. -Line: 1 Col: 269 Unexpected end tag (meta). Ignored. -Line: 1 Col: 280 This element (basefont) has no end tag. -Line: 1 Col: 290 This element (bgsound) has no end tag. -Line: 1 Col: 298 This element (embed) has no end tag. -Line: 1 Col: 307 This element (spacer) has no end tag. -Line: 1 Col: 311 Unexpected end tag (p). Ignored. -Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag. -Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag. -Line: 1 Col: 331 Unexpected end tag (caption). Ignored. -Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored. -Line: 1 Col: 350 Unexpected end tag (tbody). Ignored. -Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored. -Line: 1 Col: 366 Unexpected end tag (thead). Ignored. -Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag. -Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag. -Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag. -Line: 1 Col: 404 Unexpected end tag (dir). Ignored. -Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag. -Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag. -Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag. -Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag. -Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag. -Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag. -Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag. -Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag. -Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm. -Line: 1 Col: 471 This element (wbr) has no end tag. -Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag. -Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag. -Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag. -Line: 1 Col: 524 Unexpected end tag (html). Ignored. -Line: 1 Col: 524 Unexpected end tag (frameset). Ignored. -Line: 1 Col: 531 Unexpected end tag (head). Ignored. -Line: 1 Col: 540 Unexpected end tag (iframe). Ignored. -Line: 1 Col: 548 This element (image) has no end tag. -Line: 1 Col: 558 This element (isindex) has no end tag. -Line: 1 Col: 568 Unexpected end tag (noembed). Ignored. -Line: 1 Col: 579 Unexpected end tag (noframes). Ignored. -Line: 1 Col: 590 Unexpected end tag (noscript). Ignored. -Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored. -Line: 1 Col: 610 Unexpected end tag (option). Ignored. -Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored. -Line: 1 Col: 633 Unexpected end tag (textarea). Ignored. -#document -| -| -| -|
-| -| -| -|

- -#data - -#errors -Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. -Line: 1 Col: 10 Expected closing tag. Unexpected end of file. -#document -| -| -| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat deleted file mode 100644 index 4f8df86f..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat +++ /dev/null @@ -1,799 +0,0 @@ -#data - -#errors -#document -| -| -| -| -| - -#data -a -#errors -29: Bogus comment -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| - -#data - -#errors -35: Stray “svg†start tag. -42: Stray end tag “svg†-#document -| -| -| -| -| -#errors -43: Stray “svg†start tag. -50: Stray end tag “svg†-#document -| -| -| -| -|

-#errors -34: Start tag “svg†seen in “tableâ€. -41: Stray end tag “svgâ€. -#document -| -| -| -| -| -| - -#data -
foo
-#errors -34: Start tag “svg†seen in “tableâ€. -46: Stray end tag “gâ€. -53: Stray end tag “svgâ€. -#document -| -| -| -| -| -| -| "foo" -| - -#data -
foobar
-#errors -34: Start tag “svg†seen in “tableâ€. -46: Stray end tag “gâ€. -58: Stray end tag “gâ€. -65: Stray end tag “svgâ€. -#document -| -| -| -| -| -| -| "foo" -| -| "bar" -| - -#data -
foobar
-#errors -41: Start tag “svg†seen in “tableâ€. -53: Stray end tag “gâ€. -65: Stray end tag “gâ€. -72: Stray end tag “svgâ€. -#document -| -| -| -| -| -| -| "foo" -| -| "bar" -| -| - -#data -
foobar
-#errors -45: Start tag “svg†seen in “tableâ€. -57: Stray end tag “gâ€. -69: Stray end tag “gâ€. -76: Stray end tag “svgâ€. -#document -| -| -| -| -| -| -| "foo" -| -| "bar" -| -| -| - -#data -
foobar
-#errors -#document -| -| -| -| -| -| -| -|
-| -| -| "foo" -| -| "bar" - -#data -
foobar

baz

-#errors -#document -| -| -| -| -| -| -| -|
-| -| -| "foo" -| -| "bar" -|

-| "baz" - -#data -
foobar

baz

-#errors -#document -| -| -| -| -| -|
-| -| -| "foo" -| -| "bar" -|

-| "baz" - -#data -
foobar

baz

quux -#errors -70: HTML start tag “p†in a foreign namespace context. -81: “table†closed but “caption†was still open. -#document -| -| -| -| -| -|
-| -| -| "foo" -| -| "bar" -|

-| "baz" -|

-| "quux" - -#data -
foobarbaz

quux -#errors -78: “table†closed but “caption†was still open. -78: Unclosed elements on stack. -#document -| -| -| -| -| -|
-| -| -| "foo" -| -| "bar" -| "baz" -|

-| "quux" - -#data -foobar

baz

quux -#errors -44: Start tag “svg†seen in “tableâ€. -56: Stray end tag “gâ€. -68: Stray end tag “gâ€. -71: HTML start tag “p†in a foreign namespace context. -71: Start tag “p†seen in “tableâ€. -#document -| -| -| -| -| -| -| "foo" -| -| "bar" -|

-| "baz" -| -| -|

-| "quux" - -#data -

quux -#errors -50: Stray “svg†start tag. -54: Stray “g†start tag. -62: Stray end tag “g†-66: Stray “g†start tag. -74: Stray end tag “g†-77: Stray “p†start tag. -88: “table†end tag with “select†open. -#document -| -| -| -| -| -| -| -|
-|

quux -#errors -36: Start tag “select†seen in “tableâ€. -42: Stray “svg†start tag. -46: Stray “g†start tag. -54: Stray end tag “g†-58: Stray “g†start tag. -66: Stray end tag “g†-69: Stray “p†start tag. -80: “table†end tag with “select†open. -#document -| -| -| -| -| -|

-| "quux" - -#data -foobar

baz -#errors -41: Stray “svg†start tag. -68: HTML start tag “p†in a foreign namespace context. -#document -| -| -| -| -| -| -| "foo" -| -| "bar" -|

-| "baz" - -#data -foobar

baz -#errors -34: Stray “svg†start tag. -61: HTML start tag “p†in a foreign namespace context. -#document -| -| -| -| -| -| -| "foo" -| -| "bar" -|

-| "baz" - -#data -

-#errors -31: Stray “svg†start tag. -35: Stray “g†start tag. -40: Stray end tag “g†-44: Stray “g†start tag. -49: Stray end tag “g†-52: Stray “p†start tag. -58: Stray “span†start tag. -58: End of file seen and there were open elements. -#document -| -| -| -| - -#data -

-#errors -42: Stray “svg†start tag. -46: Stray “g†start tag. -51: Stray end tag “g†-55: Stray “g†start tag. -60: Stray end tag “g†-63: Stray “p†start tag. -69: Stray “span†start tag. -#document -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| xlink:href="foo" -| -| xlink href="foo" - -#data - -#errors -#document -| -| -| -| -| xlink:href="foo" -| xml:lang="en" -| -| -| xlink href="foo" -| xml lang="en" - -#data - -#errors -#document -| -| -| -| -| xlink:href="foo" -| xml:lang="en" -| -| -| xlink href="foo" -| xml lang="en" - -#data -bar -#errors -#document -| -| -| -| -| xlink:href="foo" -| xml:lang="en" -| -| -| xlink href="foo" -| xml lang="en" -| "bar" - -#data - -#errors -#document -| -| -| -| - -#data -

a -#errors -#document -| -| -| -|
-| -| "a" - -#data -
a -#errors -#document -| -| -| -|
-| -| -| "a" - -#data -
-#errors -#document -| -| -| -|
-| -| -| - -#data -
a -#errors -#document -| -| -| -|
-| -| -| -| -| "a" - -#data -

a -#errors -#document -| -| -| -|

-| -| -| -|

-| "a" - -#data -
    a -#errors -40: HTML start tag “ul†in a foreign namespace context. -41: End of file in a foreign namespace context. -#document -| -| -| -| -| -| -|
    -| -|
      -| "a" - -#data -
        a -#errors -35: HTML start tag “ul†in a foreign namespace context. -36: End of file in a foreign namespace context. -#document -| -| -| -| -| -| -| -|
          -| "a" - -#data -

          -#errors -#document -| -| -| -| -|

          -| -| -|

          - -#data -

          -#errors -#document -| -| -| -| -|

          -| -| -|

          - -#data -

          -#errors -#document -| -| -| -|

          -| -| -| -|

          -|

          - -#data -
          -#errors -#document -| -| -| -| -| -|
          -| -|
          -| -| - -#data -
          -#errors -#document -| -| -| -| -| -| -| -|
          -|
          -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data -

-#errors -#document -| -| -| -| -|
-| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| -| - -#data -
-#errors -#document -| -| -| -| -| -| -| -|
-| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| -| -| -| -| -| -| -| -| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat deleted file mode 100644 index 638cde47..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat +++ /dev/null @@ -1,482 +0,0 @@ -#data - -#errors -#document -| -| -| -| -| -| attributeName="" -| attributeType="" -| baseFrequency="" -| baseProfile="" -| calcMode="" -| clipPathUnits="" -| contentScriptType="" -| contentStyleType="" -| diffuseConstant="" -| edgeMode="" -| externalResourcesRequired="" -| filterRes="" -| filterUnits="" -| glyphRef="" -| gradientTransform="" -| gradientUnits="" -| kernelMatrix="" -| kernelUnitLength="" -| keyPoints="" -| keySplines="" -| keyTimes="" -| lengthAdjust="" -| limitingConeAngle="" -| markerHeight="" -| markerUnits="" -| markerWidth="" -| maskContentUnits="" -| maskUnits="" -| numOctaves="" -| pathLength="" -| patternContentUnits="" -| patternTransform="" -| patternUnits="" -| pointsAtX="" -| pointsAtY="" -| pointsAtZ="" -| preserveAlpha="" -| preserveAspectRatio="" -| primitiveUnits="" -| refX="" -| refY="" -| repeatCount="" -| repeatDur="" -| requiredExtensions="" -| requiredFeatures="" -| specularConstant="" -| specularExponent="" -| spreadMethod="" -| startOffset="" -| stdDeviation="" -| stitchTiles="" -| surfaceScale="" -| systemLanguage="" -| tableValues="" -| targetX="" -| targetY="" -| textLength="" -| viewBox="" -| viewTarget="" -| xChannelSelector="" -| yChannelSelector="" -| zoomAndPan="" - -#data - -#errors -#document -| -| -| -| -| -| attributeName="" -| attributeType="" -| baseFrequency="" -| baseProfile="" -| calcMode="" -| clipPathUnits="" -| contentScriptType="" -| contentStyleType="" -| diffuseConstant="" -| edgeMode="" -| externalResourcesRequired="" -| filterRes="" -| filterUnits="" -| glyphRef="" -| gradientTransform="" -| gradientUnits="" -| kernelMatrix="" -| kernelUnitLength="" -| keyPoints="" -| keySplines="" -| keyTimes="" -| lengthAdjust="" -| limitingConeAngle="" -| markerHeight="" -| markerUnits="" -| markerWidth="" -| maskContentUnits="" -| maskUnits="" -| numOctaves="" -| pathLength="" -| patternContentUnits="" -| patternTransform="" -| patternUnits="" -| pointsAtX="" -| pointsAtY="" -| pointsAtZ="" -| preserveAlpha="" -| preserveAspectRatio="" -| primitiveUnits="" -| refX="" -| refY="" -| repeatCount="" -| repeatDur="" -| requiredExtensions="" -| requiredFeatures="" -| specularConstant="" -| specularExponent="" -| spreadMethod="" -| startOffset="" -| stdDeviation="" -| stitchTiles="" -| surfaceScale="" -| systemLanguage="" -| tableValues="" -| targetX="" -| targetY="" -| textLength="" -| viewBox="" -| viewTarget="" -| xChannelSelector="" -| yChannelSelector="" -| zoomAndPan="" - -#data - -#errors -#document -| -| -| -| -| -| attributeName="" -| attributeType="" -| baseFrequency="" -| baseProfile="" -| calcMode="" -| clipPathUnits="" -| contentScriptType="" -| contentStyleType="" -| diffuseConstant="" -| edgeMode="" -| externalResourcesRequired="" -| filterRes="" -| filterUnits="" -| glyphRef="" -| gradientTransform="" -| gradientUnits="" -| kernelMatrix="" -| kernelUnitLength="" -| keyPoints="" -| keySplines="" -| keyTimes="" -| lengthAdjust="" -| limitingConeAngle="" -| markerHeight="" -| markerUnits="" -| markerWidth="" -| maskContentUnits="" -| maskUnits="" -| numOctaves="" -| pathLength="" -| patternContentUnits="" -| patternTransform="" -| patternUnits="" -| pointsAtX="" -| pointsAtY="" -| pointsAtZ="" -| preserveAlpha="" -| preserveAspectRatio="" -| primitiveUnits="" -| refX="" -| refY="" -| repeatCount="" -| repeatDur="" -| requiredExtensions="" -| requiredFeatures="" -| specularConstant="" -| specularExponent="" -| spreadMethod="" -| startOffset="" -| stdDeviation="" -| stitchTiles="" -| surfaceScale="" -| systemLanguage="" -| tableValues="" -| targetX="" -| targetY="" -| textLength="" -| viewBox="" -| viewTarget="" -| xChannelSelector="" -| yChannelSelector="" -| zoomAndPan="" - -#data - -#errors -#document -| -| -| -| -| -| attributename="" -| attributetype="" -| basefrequency="" -| baseprofile="" -| calcmode="" -| clippathunits="" -| contentscripttype="" -| contentstyletype="" -| diffuseconstant="" -| edgemode="" -| externalresourcesrequired="" -| filterres="" -| filterunits="" -| glyphref="" -| gradienttransform="" -| gradientunits="" -| kernelmatrix="" -| kernelunitlength="" -| keypoints="" -| keysplines="" -| keytimes="" -| lengthadjust="" -| limitingconeangle="" -| markerheight="" -| markerunits="" -| markerwidth="" -| maskcontentunits="" -| maskunits="" -| numoctaves="" -| pathlength="" -| patterncontentunits="" -| patterntransform="" -| patternunits="" -| pointsatx="" -| pointsaty="" -| pointsatz="" -| preservealpha="" -| preserveaspectratio="" -| primitiveunits="" -| refx="" -| refy="" -| repeatcount="" -| repeatdur="" -| requiredextensions="" -| requiredfeatures="" -| specularconstant="" -| specularexponent="" -| spreadmethod="" -| startoffset="" -| stddeviation="" -| stitchtiles="" -| surfacescale="" -| systemlanguage="" -| tablevalues="" -| targetx="" -| targety="" -| textlength="" -| viewbox="" -| viewtarget="" -| xchannelselector="" -| ychannelselector="" -| zoomandpan="" - -#data - -#errors -#document -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat deleted file mode 100644 index 63107d27..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat +++ /dev/null @@ -1,62 +0,0 @@ -#data -

foobazeggs

spam

quuxbar -#errors -#document -| -| -| -| -|

-| "foo" -| -| -| -| "baz" -| -| -| -| -| "eggs" -| -| -|

-| "spam" -| -| -| -|
-| -| -| "quux" -| "bar" - -#data -foobazeggs

spam
quuxbar -#errors -#document -| -| -| -| -| "foo" -| -| -| -| "baz" -| -| -| -| -| "eggs" -| -| -|

-| "spam" -| -| -| -|
-| -| -| "quux" -| "bar" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat deleted file mode 100644 index b8713f88..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat +++ /dev/null @@ -1,74 +0,0 @@ -#data - -#errors -#document -| -| -| -| -| - -#data - -#errors -#document -| -| -| -| -| -| - -#data - -#errors -15: Unexpected start tag html -#document -| -| -| abc:def="gh" -| -| -| - -#data - -#errors -15: Unexpected start tag html -#document -| -| -| xml:lang="bar" -| -| - -#data - -#errors -#document -| -| -| 123="456" -| -| - -#data - -#errors -#document -| -| -| 123="456" -| 789="012" -| -| - -#data - -#errors -#document -| -| -| -| -| 789="012" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat deleted file mode 100644 index 6ce1c0d1..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat +++ /dev/null @@ -1,208 +0,0 @@ -#data -

X -#errors -Line: 1 Col: 31 Unexpected end tag (p). Ignored. -Line: 1 Col: 36 Expected closing tag. Unexpected end of file. -#document -| -| -| -| -|

-| -| -| -| -| -| -| " " -|

-| "X" - -#data -

-

X -#errors -Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE. -Line: 1 Col: 16 Unexpected end tag (p). Ignored. -Line: 2 Col: 4 Expected closing tag. Unexpected end of file. -#document -| -| -| -|

-| -| -| -| -| -| -| " -" -|

-| "X" - -#data - -#errors -Line: 1 Col: 22 Unexpected end tag (html) after the (implied) root element. -#document -| -| -| -| -| " " - -#data - -#errors -Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. -#document -| -| -| -| -| - -#data - -#errors -Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE. -Line: 1 Col: 13 Unexpected end tag (html) after the (implied) root element. -#document -| -| -| -| - -#data -X -#errors -Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. -#document -| -| -| -| -| -| "X" - -#data -<!doctype html><table> X<meta></table> -#errors -Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. -Line: 1 Col: 30 Unexpected start tag (meta) in table context caused voodoo mode. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| " X" -| <meta> -| <table> - -#data -<!doctype html><table> x</table> -#errors -Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| " x" -| <table> - -#data -<!doctype html><table> x </table> -#errors -Line: 1 Col: 25 Unexpected non-space characters in table context caused voodoo mode. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| " x " -| <table> - -#data -<!doctype html><table><tr> x</table> -#errors -Line: 1 Col: 28 Unexpected non-space characters in table context caused voodoo mode. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| " x" -| <table> -| <tbody> -| <tr> - -#data -<!doctype html><table>X<style> <tr>x </style> </table> -#errors -Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| "X" -| <table> -| <style> -| " <tr>x " -| " " - -#data -<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div> -#errors -Line: 1 Col: 30 Unexpected start tag (a) in table context caused voodoo mode. -Line: 1 Col: 37 Unexpected end tag (a) in table context caused voodoo mode. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <div> -| <a> -| "foo" -| <table> -| " " -| <tbody> -| <tr> -| <td> -| "bar" -| " " - -#data -<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes> -#errors -6: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€. -13: Stray start tag “frameâ€. -21: Stray end tag “frameâ€. -29: Stray end tag “frameâ€. -39: “frameset†start tag after “body†already open. -105: End of file seen inside an [R]CDATA element. -105: End of file seen and there were open elements. -XXX: These errors are wrong, please fix me! -#document -| <html> -| <head> -| <frameset> -| <frame> -| <frameset> -| <frame> -| <noframes> -| "</frameset><noframes>" - -#data -<!DOCTYPE html><object></html> -#errors -1: Expected closing tag. Unexpected end of file -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <object> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat deleted file mode 100644 index c8ef66f0..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat +++ /dev/null @@ -1,2299 +0,0 @@ -#data -<!doctype html><script> -#errors -Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| <body> - -#data -<!doctype html><script>a -#errors -Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "a" -| <body> - -#data -<!doctype html><script>< -#errors -Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<" -| <body> - -#data -<!doctype html><script></ -#errors -Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</" -| <body> - -#data -<!doctype html><script></S -#errors -Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</S" -| <body> - -#data -<!doctype html><script></SC -#errors -Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</SC" -| <body> - -#data -<!doctype html><script></SCR -#errors -Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</SCR" -| <body> - -#data -<!doctype html><script></SCRI -#errors -Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</SCRI" -| <body> - -#data -<!doctype html><script></SCRIP -#errors -Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</SCRIP" -| <body> - -#data -<!doctype html><script></SCRIPT -#errors -Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</SCRIPT" -| <body> - -#data -<!doctype html><script></SCRIPT -#errors -Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| <body> - -#data -<!doctype html><script></s -#errors -Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</s" -| <body> - -#data -<!doctype html><script></sc -#errors -Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</sc" -| <body> - -#data -<!doctype html><script></scr -#errors -Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</scr" -| <body> - -#data -<!doctype html><script></scri -#errors -Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</scri" -| <body> - -#data -<!doctype html><script></scrip -#errors -Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</scrip" -| <body> - -#data -<!doctype html><script></script -#errors -Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "</script" -| <body> - -#data -<!doctype html><script></script -#errors -Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| <body> - -#data -<!doctype html><script><! -#errors -Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!" -| <body> - -#data -<!doctype html><script><!a -#errors -Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!a" -| <body> - -#data -<!doctype html><script><!- -#errors -Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!-" -| <body> - -#data -<!doctype html><script><!-a -#errors -Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!-a" -| <body> - -#data -<!doctype html><script><!-- -#errors -Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--" -| <body> - -#data -<!doctype html><script><!--a -#errors -Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--a" -| <body> - -#data -<!doctype html><script><!--< -#errors -Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<" -| <body> - -#data -<!doctype html><script><!--<a -#errors -Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<a" -| <body> - -#data -<!doctype html><script><!--</ -#errors -Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--</" -| <body> - -#data -<!doctype html><script><!--</script -#errors -Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--</script" -| <body> - -#data -<!doctype html><script><!--</script -#errors -Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--" -| <body> - -#data -<!doctype html><script><!--<s -#errors -Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<s" -| <body> - -#data -<!doctype html><script><!--<script -#errors -Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script" -| <body> - -#data -<!doctype html><script><!--<script -#errors -Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script " -| <body> - -#data -<!doctype html><script><!--<script < -#errors -Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script <" -| <body> - -#data -<!doctype html><script><!--<script <a -#errors -Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script <a" -| <body> - -#data -<!doctype html><script><!--<script </ -#errors -Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </" -| <body> - -#data -<!doctype html><script><!--<script </s -#errors -Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </s" -| <body> - -#data -<!doctype html><script><!--<script </script -#errors -Line: 1 Col: 43 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script" -| <body> - -#data -<!doctype html><script><!--<script </scripta -#errors -Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </scripta" -| <body> - -#data -<!doctype html><script><!--<script </script -#errors -Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script " -| <body> - -#data -<!doctype html><script><!--<script </script> -#errors -Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script>" -| <body> - -#data -<!doctype html><script><!--<script </script/ -#errors -Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script/" -| <body> - -#data -<!doctype html><script><!--<script </script < -#errors -Line: 1 Col: 45 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script <" -| <body> - -#data -<!doctype html><script><!--<script </script <a -#errors -Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script <a" -| <body> - -#data -<!doctype html><script><!--<script </script </ -#errors -Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script </" -| <body> - -#data -<!doctype html><script><!--<script </script </script -#errors -Line: 1 Col: 52 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script </script" -| <body> - -#data -<!doctype html><script><!--<script </script </script -#errors -Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script " -| <body> - -#data -<!doctype html><script><!--<script </script </script/ -#errors -Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script " -| <body> - -#data -<!doctype html><script><!--<script </script </script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script </script " -| <body> - -#data -<!doctype html><script><!--<script - -#errors -Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script -" -| <body> - -#data -<!doctype html><script><!--<script -a -#errors -Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script -a" -| <body> - -#data -<!doctype html><script><!--<script -< -#errors -Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script -<" -| <body> - -#data -<!doctype html><script><!--<script -- -#errors -Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script --" -| <body> - -#data -<!doctype html><script><!--<script --a -#errors -Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script --a" -| <body> - -#data -<!doctype html><script><!--<script --< -#errors -Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script --<" -| <body> - -#data -<!doctype html><script><!--<script --> -#errors -Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script -->" -| <body> - -#data -<!doctype html><script><!--<script -->< -#errors -Line: 1 Col: 39 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script --><" -| <body> - -#data -<!doctype html><script><!--<script --></ -#errors -Line: 1 Col: 40 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script --></" -| <body> - -#data -<!doctype html><script><!--<script --></script -#errors -Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script --></script" -| <body> - -#data -<!doctype html><script><!--<script --></script -#errors -Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script -->" -| <body> - -#data -<!doctype html><script><!--<script --></script/ -#errors -Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script -->" -| <body> - -#data -<!doctype html><script><!--<script --></script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script -->" -| <body> - -#data -<!doctype html><script><!--<script><\/script>--></script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script><\/script>-->" -| <body> - -#data -<!doctype html><script><!--<script></scr'+'ipt>--></script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script></scr'+'ipt>-->" -| <body> - -#data -<!doctype html><script><!--<script></script><script></script></script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>" -| <body> - -#data -<!doctype html><script><!--<script></script><script></script>--><!--</script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>--><!--" -| <body> - -#data -<!doctype html><script><!--<script></script><script></script>-- ></script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>-- >" -| <body> - -#data -<!doctype html><script><!--<script></script><script></script>- -></script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>- ->" -| <body> - -#data -<!doctype html><script><!--<script></script><script></script>- - ></script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>- - >" -| <body> - -#data -<!doctype html><script><!--<script></script><script></script>-></script> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>->" -| <body> - -#data -<!doctype html><script><!--<script>--!></script>X -#errors -Line: 1 Col: 49 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script>--!></script>X" -| <body> - -#data -<!doctype html><script><!--<scr'+'ipt></script>--></script> -#errors -Line: 1 Col: 59 Unexpected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<scr'+'ipt>" -| <body> -| "-->" - -#data -<!doctype html><script><!--<script></scr'+'ipt></script>X -#errors -Line: 1 Col: 57 Unexpected end of file. Expected end tag (script). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| "<!--<script></scr'+'ipt></script>X" -| <body> - -#data -<!doctype html><style><!--<style></style>--></style> -#errors -Line: 1 Col: 52 Unexpected end tag (style). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <style> -| "<!--<style>" -| <body> -| "-->" - -#data -<!doctype html><style><!--</style>X -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <style> -| "<!--" -| <body> -| "X" - -#data -<!doctype html><style><!--...</style>...--></style> -#errors -Line: 1 Col: 51 Unexpected end tag (style). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <style> -| "<!--..." -| <body> -| "...-->" - -#data -<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <style> -| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" -| <body> -| "X" - -#data -<!doctype html><style><!--...<style><!--...--!></style>--></style> -#errors -Line: 1 Col: 66 Unexpected end tag (style). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <style> -| "<!--...<style><!--...--!>" -| <body> -| "-->" - -#data -<!doctype html><style><!--...</style><!-- --><style>@import ...</style> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <style> -| "<!--..." -| <!-- --> -| <style> -| "@import ..." -| <body> - -#data -<!doctype html><style>...<style><!--...</style><!-- --></style> -#errors -Line: 1 Col: 63 Unexpected end tag (style). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <style> -| "...<style><!--..." -| <!-- --> -| <body> - -#data -<!doctype html><style>...<!--[if IE]><style>...</style>X -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <style> -| "...<!--[if IE]><style>..." -| <body> -| "X" - -#data -<!doctype html><title><!--<title>--> -#errors -Line: 1 Col: 52 Unexpected end tag (title). -#document -| -| -| -| -| "<!--<title>" -| <body> -| "-->" - -#data -<!doctype html><title></title> -#errors -#document -| -| -| -| -| "" -| - -#data -foo/title><link></head><body>X -#errors -Line: 1 Col: 52 Unexpected end of file. Expected end tag (title). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <title> -| "foo/title><link></head><body>X" -| <body> - -#data -<!doctype html><noscript><!--<noscript></noscript>--></noscript> -#errors -Line: 1 Col: 64 Unexpected end tag (noscript). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <noscript> -| "<!--<noscript>" -| <body> -| "-->" - -#data -<!doctype html><noscript><!--</noscript>X<noscript>--></noscript> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <noscript> -| "<!--" -| <body> -| "X" -| <noscript> -| "-->" - -#data -<!doctype html><noscript><iframe></noscript>X -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <noscript> -| "<iframe>" -| <body> -| "X" - -#data -<!doctype html><noframes><!--<noframes></noframes>--></noframes> -#errors -Line: 1 Col: 64 Unexpected end tag (noframes). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <noframes> -| "<!--<noframes>" -| <body> -| "-->" - -#data -<!doctype html><noframes><body><script><!--...</script></body></noframes></html> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <noframes> -| "<body><script><!--...</script></body>" -| <body> - -#data -<!doctype html><textarea><!--<textarea></textarea>--></textarea> -#errors -Line: 1 Col: 64 Unexpected end tag (textarea). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <textarea> -| "<!--<textarea>" -| "-->" - -#data -<!doctype html><textarea></textarea></textarea> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <textarea> -| "</textarea>" - -#data -<!doctype html><textarea><</textarea> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <textarea> -| "<" - -#data -<!doctype html><textarea>a<b</textarea> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <textarea> -| "a<b" - -#data -<!doctype html><iframe><!--<iframe></iframe>--></iframe> -#errors -Line: 1 Col: 56 Unexpected end tag (iframe). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <iframe> -| "<!--<iframe>" -| "-->" - -#data -<!doctype html><iframe>...<!--X->...<!--/X->...</iframe> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <iframe> -| "...<!--X->...<!--/X->..." - -#data -<!doctype html><xmp><!--<xmp></xmp>--></xmp> -#errors -Line: 1 Col: 44 Unexpected end tag (xmp). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <xmp> -| "<!--<xmp>" -| "-->" - -#data -<!doctype html><noembed><!--<noembed></noembed>--></noembed> -#errors -Line: 1 Col: 60 Unexpected end tag (noembed). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <noembed> -| "<!--<noembed>" -| "-->" - -#data -<script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 8 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| <body> - -#data -<script>a -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "a" -| <body> - -#data -<script>< -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<" -| <body> - -#data -<script></ -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</" -| <body> - -#data -<script></S -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</S" -| <body> - -#data -<script></SC -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</SC" -| <body> - -#data -<script></SCR -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</SCR" -| <body> - -#data -<script></SCRI -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</SCRI" -| <body> - -#data -<script></SCRIP -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</SCRIP" -| <body> - -#data -<script></SCRIPT -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</SCRIPT" -| <body> - -#data -<script></SCRIPT -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| <body> - -#data -<script></s -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</s" -| <body> - -#data -<script></sc -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</sc" -| <body> - -#data -<script></scr -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</scr" -| <body> - -#data -<script></scri -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</scri" -| <body> - -#data -<script></scrip -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</scrip" -| <body> - -#data -<script></script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</script" -| <body> - -#data -<script></script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| <body> - -#data -<script><! -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!" -| <body> - -#data -<script><!a -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!a" -| <body> - -#data -<script><!- -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!-" -| <body> - -#data -<script><!-a -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!-a" -| <body> - -#data -<script><!-- -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--" -| <body> - -#data -<script><!--a -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--a" -| <body> - -#data -<script><!--< -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<" -| <body> - -#data -<script><!--<a -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<a" -| <body> - -#data -<script><!--</ -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--</" -| <body> - -#data -<script><!--</script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--</script" -| <body> - -#data -<script><!--</script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--" -| <body> - -#data -<script><!--<s -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<s" -| <body> - -#data -<script><!--<script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 19 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script" -| <body> - -#data -<script><!--<script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script " -| <body> - -#data -<script><!--<script < -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script <" -| <body> - -#data -<script><!--<script <a -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script <a" -| <body> - -#data -<script><!--<script </ -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </" -| <body> - -#data -<script><!--<script </s -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </s" -| <body> - -#data -<script><!--<script </script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script" -| <body> - -#data -<script><!--<script </scripta -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </scripta" -| <body> - -#data -<script><!--<script </script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script " -| <body> - -#data -<script><!--<script </script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script>" -| <body> - -#data -<script><!--<script </script/ -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script/" -| <body> - -#data -<script><!--<script </script < -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script <" -| <body> - -#data -<script><!--<script </script <a -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script <a" -| <body> - -#data -<script><!--<script </script </ -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script </" -| <body> - -#data -<script><!--<script </script </script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script </script" -| <body> - -#data -<script><!--<script </script </script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script " -| <body> - -#data -<script><!--<script </script </script/ -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script </script " -| <body> - -#data -<script><!--<script </script </script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script </script " -| <body> - -#data -<script><!--<script - -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script -" -| <body> - -#data -<script><!--<script -a -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script -a" -| <body> - -#data -<script><!--<script -- -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script --" -| <body> - -#data -<script><!--<script --a -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script --a" -| <body> - -#data -<script><!--<script --> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script -->" -| <body> - -#data -<script><!--<script -->< -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script --><" -| <body> - -#data -<script><!--<script --></ -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script --></" -| <body> - -#data -<script><!--<script --></script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script --></script" -| <body> - -#data -<script><!--<script --></script -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script -->" -| <body> - -#data -<script><!--<script --></script/ -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script -->" -| <body> - -#data -<script><!--<script --></script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script -->" -| <body> - -#data -<script><!--<script><\/script>--></script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script><\/script>-->" -| <body> - -#data -<script><!--<script></scr'+'ipt>--></script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script></scr'+'ipt>-->" -| <body> - -#data -<script><!--<script></script><script></script></script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>" -| <body> - -#data -<script><!--<script></script><script></script>--><!--</script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>--><!--" -| <body> - -#data -<script><!--<script></script><script></script>-- ></script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>-- >" -| <body> - -#data -<script><!--<script></script><script></script>- -></script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>- ->" -| <body> - -#data -<script><!--<script></script><script></script>- - ></script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>- - >" -| <body> - -#data -<script><!--<script></script><script></script>-></script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -#document -| <html> -| <head> -| <script> -| "<!--<script></script><script></script>->" -| <body> - -#data -<script><!--<script>--!></script>X -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script>--!></script>X" -| <body> - -#data -<script><!--<scr'+'ipt></script>--></script> -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 44 Unexpected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<scr'+'ipt>" -| <body> -| "-->" - -#data -<script><!--<script></scr'+'ipt></script>X -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 42 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "<!--<script></scr'+'ipt></script>X" -| <body> - -#data -<style><!--<style></style>--></style> -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -Line: 1 Col: 37 Unexpected end tag (style). -#document -| <html> -| <head> -| <style> -| "<!--<style>" -| <body> -| "-->" - -#data -<style><!--</style>X -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -#document -| <html> -| <head> -| <style> -| "<!--" -| <body> -| "X" - -#data -<style><!--...</style>...--></style> -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -Line: 1 Col: 36 Unexpected end tag (style). -#document -| <html> -| <head> -| <style> -| "<!--..." -| <body> -| "...-->" - -#data -<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -#document -| <html> -| <head> -| <style> -| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" -| <body> -| "X" - -#data -<style><!--...<style><!--...--!></style>--></style> -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -Line: 1 Col: 51 Unexpected end tag (style). -#document -| <html> -| <head> -| <style> -| "<!--...<style><!--...--!>" -| <body> -| "-->" - -#data -<style><!--...</style><!-- --><style>@import ...</style> -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -#document -| <html> -| <head> -| <style> -| "<!--..." -| <!-- --> -| <style> -| "@import ..." -| <body> - -#data -<style>...<style><!--...</style><!-- --></style> -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -Line: 1 Col: 48 Unexpected end tag (style). -#document -| <html> -| <head> -| <style> -| "...<style><!--..." -| <!-- --> -| <body> - -#data -<style>...<!--[if IE]><style>...</style>X -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -#document -| <html> -| <head> -| <style> -| "...<!--[if IE]><style>..." -| <body> -| "X" - -#data -<title><!--<title>--> -#errors -Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. -Line: 1 Col: 37 Unexpected end tag (title). -#document -| -| -| -| "<!--<title>" -| <body> -| "-->" - -#data -<title></title> -#errors -Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. -#document -| -| -| -| "" -| - -#data -foo/title><link></head><body>X -#errors -Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. -Line: 1 Col: 37 Unexpected end of file. Expected end tag (title). -#document -| <html> -| <head> -| <title> -| "foo/title><link></head><body>X" -| <body> - -#data -<noscript><!--<noscript></noscript>--></noscript> -#errors -Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. -Line: 1 Col: 49 Unexpected end tag (noscript). -#document -| <html> -| <head> -| <noscript> -| "<!--<noscript>" -| <body> -| "-->" - -#data -<noscript><!--</noscript>X<noscript>--></noscript> -#errors -Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. -#document -| <html> -| <head> -| <noscript> -| "<!--" -| <body> -| "X" -| <noscript> -| "-->" - -#data -<noscript><iframe></noscript>X -#errors -Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. -#document -| <html> -| <head> -| <noscript> -| "<iframe>" -| <body> -| "X" - -#data -<noframes><!--<noframes></noframes>--></noframes> -#errors -Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. -Line: 1 Col: 49 Unexpected end tag (noframes). -#document -| <html> -| <head> -| <noframes> -| "<!--<noframes>" -| <body> -| "-->" - -#data -<noframes><body><script><!--...</script></body></noframes></html> -#errors -Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. -#document -| <html> -| <head> -| <noframes> -| "<body><script><!--...</script></body>" -| <body> - -#data -<textarea><!--<textarea></textarea>--></textarea> -#errors -Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. -Line: 1 Col: 49 Unexpected end tag (textarea). -#document -| <html> -| <head> -| <body> -| <textarea> -| "<!--<textarea>" -| "-->" - -#data -<textarea></textarea></textarea> -#errors -Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| <textarea> -| "</textarea>" - -#data -<iframe><!--<iframe></iframe>--></iframe> -#errors -Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. -Line: 1 Col: 41 Unexpected end tag (iframe). -#document -| <html> -| <head> -| <body> -| <iframe> -| "<!--<iframe>" -| "-->" - -#data -<iframe>...<!--X->...<!--/X->...</iframe> -#errors -Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| <iframe> -| "...<!--X->...<!--/X->..." - -#data -<xmp><!--<xmp></xmp>--></xmp> -#errors -Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE. -Line: 1 Col: 29 Unexpected end tag (xmp). -#document -| <html> -| <head> -| <body> -| <xmp> -| "<!--<xmp>" -| "-->" - -#data -<noembed><!--<noembed></noembed>--></noembed> -#errors -Line: 1 Col: 9 Unexpected start tag (noembed). Expected DOCTYPE. -Line: 1 Col: 45 Unexpected end tag (noembed). -#document -| <html> -| <head> -| <body> -| <noembed> -| "<!--<noembed>" -| "-->" - -#data -<!doctype html><table> - -#errors -Line 2 Col 0 Unexpected end of file. Expected table content. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| " -" - -#data -<!doctype html><table><td><span><font></span><span> -#errors -Line 1 Col 26 Unexpected table cell start tag (td) in the table body phase. -Line 1 Col 45 Unexpected end tag (span). -Line 1 Col 51 Expected closing tag. Unexpected end of file. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <td> -| <span> -| <font> -| <font> -| <span> - -#data -<!doctype html><form><table></form><form></table></form> -#errors -35: Stray end tag “formâ€. -41: Start tag “form†seen in “tableâ€. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <form> -| <table> -| <form> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat deleted file mode 100644 index 7b555f88..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat +++ /dev/null @@ -1,153 +0,0 @@ -#data -<!doctype html><table><tbody><select><tr> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <table> -| <tbody> -| <tr> - -#data -<!doctype html><table><tr><select><td> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <table> -| <tbody> -| <tr> -| <td> - -#data -<!doctype html><table><tr><td><select><td> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <td> -| <select> -| <td> - -#data -<!doctype html><table><tr><th><select><td> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <th> -| <select> -| <td> - -#data -<!doctype html><table><caption><select><tr> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <caption> -| <select> -| <tbody> -| <tr> - -#data -<!doctype html><select><tr> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> - -#data -<!doctype html><select><td> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> - -#data -<!doctype html><select><th> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> - -#data -<!doctype html><select><tbody> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> - -#data -<!doctype html><select><thead> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> - -#data -<!doctype html><select><tfoot> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> - -#data -<!doctype html><select><caption> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> - -#data -<!doctype html><table><tr></table>a -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| "a" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat deleted file mode 100644 index 680e1f06..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat +++ /dev/null @@ -1,269 +0,0 @@ -#data -<!doctype html><plaintext></plaintext> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <plaintext> -| "</plaintext>" - -#data -<!doctype html><table><plaintext></plaintext> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <plaintext> -| "</plaintext>" -| <table> - -#data -<!doctype html><table><tbody><plaintext></plaintext> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <plaintext> -| "</plaintext>" -| <table> -| <tbody> - -#data -<!doctype html><table><tbody><tr><plaintext></plaintext> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <plaintext> -| "</plaintext>" -| <table> -| <tbody> -| <tr> - -#data -<!doctype html><table><tbody><tr><plaintext></plaintext> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <plaintext> -| "</plaintext>" -| <table> -| <tbody> -| <tr> - -#data -<!doctype html><table><td><plaintext></plaintext> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <td> -| <plaintext> -| "</plaintext>" - -#data -<!doctype html><table><caption><plaintext></plaintext> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <caption> -| <plaintext> -| "</plaintext>" - -#data -<!doctype html><table><tr><style></script></style>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| "abc" -| <table> -| <tbody> -| <tr> -| <style> -| "</script>" - -#data -<!doctype html><table><tr><script></style></script>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| "abc" -| <table> -| <tbody> -| <tr> -| <script> -| "</style>" - -#data -<!doctype html><table><caption><style></script></style>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <caption> -| <style> -| "</script>" -| "abc" - -#data -<!doctype html><table><td><style></script></style>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <td> -| <style> -| "</script>" -| "abc" - -#data -<!doctype html><select><script></style></script>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <script> -| "</style>" -| "abc" - -#data -<!doctype html><table><select><script></style></script>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <script> -| "</style>" -| "abc" -| <table> - -#data -<!doctype html><table><tr><select><script></style></script>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <script> -| "</style>" -| "abc" -| <table> -| <tbody> -| <tr> - -#data -<!doctype html><frameset></frameset><noframes>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <noframes> -| "abc" - -#data -<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc--> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <noframes> -| "abc" -| <!-- abc --> - -#data -<!doctype html><frameset></frameset></html><noframes>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <noframes> -| "abc" - -#data -<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc--> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <noframes> -| "abc" -| <!-- abc --> - -#data -<!doctype html><table><tr></tbody><tfoot> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <tfoot> - -#data -<!doctype html><table><td><svg></svg>abc<td> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <td> -| <svg svg> -| "abc" -| <td> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat deleted file mode 100644 index 0d62f5a5..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat +++ /dev/null @@ -1,1237 +0,0 @@ -#data -<!doctype html><math><mn DefinitionUrl="foo"> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <math math> -| <math mn> -| definitionURL="foo" - -#data -<!doctype html><html></p><!--foo--> -#errors -#document -| <!DOCTYPE html> -| <html> -| <!-- foo --> -| <head> -| <body> - -#data -<!doctype html><head></head></p><!--foo--> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <!-- foo --> -| <body> - -#data -<!doctype html><body><p><pre> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <pre> - -#data -<!doctype html><body><p><listing> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <listing> - -#data -<!doctype html><p><plaintext> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <plaintext> - -#data -<!doctype html><p><h1> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <h1> - -#data -<!doctype html><form><isindex> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <form> - -#data -<!doctype html><isindex action="POST"> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <form> -| action="POST" -| <hr> -| <label> -| "This is a searchable index. Enter search keywords: " -| <input> -| name="isindex" -| <hr> - -#data -<!doctype html><isindex prompt="this is isindex"> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <form> -| <hr> -| <label> -| "this is isindex" -| <input> -| name="isindex" -| <hr> - -#data -<!doctype html><isindex type="hidden"> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <form> -| <hr> -| <label> -| "This is a searchable index. Enter search keywords: " -| <input> -| name="isindex" -| type="hidden" -| <hr> - -#data -<!doctype html><isindex name="foo"> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <form> -| <hr> -| <label> -| "This is a searchable index. Enter search keywords: " -| <input> -| name="isindex" -| <hr> - -#data -<!doctype html><ruby><p><rp> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <ruby> -| <p> -| <rp> - -#data -<!doctype html><ruby><div><span><rp> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <ruby> -| <div> -| <span> -| <rp> - -#data -<!doctype html><ruby><div><p><rp> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <ruby> -| <div> -| <p> -| <rp> - -#data -<!doctype html><ruby><p><rt> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <ruby> -| <p> -| <rt> - -#data -<!doctype html><ruby><div><span><rt> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <ruby> -| <div> -| <span> -| <rt> - -#data -<!doctype html><ruby><div><p><rt> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <ruby> -| <div> -| <p> -| <rt> - -#data -<!doctype html><math/><foo> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <math math> -| <foo> - -#data -<!doctype html><svg/><foo> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <svg svg> -| <foo> - -#data -<!doctype html><div></body><!--foo--> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <div> -| <!-- foo --> - -#data -<!doctype html><h1><div><h3><span></h1>foo -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <h1> -| <div> -| <h3> -| <span> -| "foo" - -#data -<!doctype html><p></h3>foo -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| "foo" - -#data -<!doctype html><h3><li>abc</h2>foo -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <h3> -| <li> -| "abc" -| "foo" - -#data -<!doctype html><table>abc<!--foo--> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| "abc" -| <table> -| <!-- foo --> - -#data -<!doctype html><table> <!--foo--> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| " " -| <!-- foo --> - -#data -<!doctype html><table> b <!--foo--> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| " b " -| <table> -| <!-- foo --> - -#data -<!doctype html><select><option><option> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <option> -| <option> - -#data -<!doctype html><select><option></optgroup> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <option> - -#data -<!doctype html><select><option></optgroup> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <option> - -#data -<!doctype html><p><math><mi><p><h1> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <math math> -| <math mi> -| <p> -| <h1> - -#data -<!doctype html><p><math><mo><p><h1> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <math math> -| <math mo> -| <p> -| <h1> - -#data -<!doctype html><p><math><mn><p><h1> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <math math> -| <math mn> -| <p> -| <h1> - -#data -<!doctype html><p><math><ms><p><h1> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <math math> -| <math ms> -| <p> -| <h1> - -#data -<!doctype html><p><math><mtext><p><h1> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <math math> -| <math mtext> -| <p> -| <h1> - -#data -<!doctype html><frameset></noframes> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> - -#data -<!doctype html><html c=d><body></html><html a=b> -#errors -#document -| <!DOCTYPE html> -| <html> -| a="b" -| c="d" -| <head> -| <body> - -#data -<!doctype html><html c=d><frameset></frameset></html><html a=b> -#errors -#document -| <!DOCTYPE html> -| <html> -| a="b" -| c="d" -| <head> -| <frameset> - -#data -<!doctype html><html><frameset></frameset></html><!--foo--> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <!-- foo --> - -#data -<!doctype html><html><frameset></frameset></html> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| " " - -#data -<!doctype html><html><frameset></frameset></html>abc -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> - -#data -<!doctype html><html><frameset></frameset></html><p> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> - -#data -<!doctype html><html><frameset></frameset></html></p> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> - -#data -<html><frameset></frameset></html><!doctype html> -#errors -#document -| <html> -| <head> -| <frameset> - -#data -<!doctype html><body><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> - -#data -<!doctype html><p><frameset><frame> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <frame> - -#data -<!doctype html><p>a<frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| "a" - -#data -<!doctype html><p> <frameset><frame> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <frame> - -#data -<!doctype html><pre><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <pre> - -#data -<!doctype html><listing><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <listing> - -#data -<!doctype html><li><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <li> - -#data -<!doctype html><dd><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <dd> - -#data -<!doctype html><dt><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <dt> - -#data -<!doctype html><button><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <button> - -#data -<!doctype html><applet><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <applet> - -#data -<!doctype html><marquee><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <marquee> - -#data -<!doctype html><object><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <object> - -#data -<!doctype html><table><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> - -#data -<!doctype html><area><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <area> - -#data -<!doctype html><basefont><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <basefont> -| <frameset> - -#data -<!doctype html><bgsound><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <bgsound> -| <frameset> - -#data -<!doctype html><br><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <br> - -#data -<!doctype html><embed><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <embed> - -#data -<!doctype html><img><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <img> - -#data -<!doctype html><input><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <input> - -#data -<!doctype html><keygen><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <keygen> - -#data -<!doctype html><wbr><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <wbr> - -#data -<!doctype html><hr><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <hr> - -#data -<!doctype html><textarea></textarea><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <textarea> - -#data -<!doctype html><xmp></xmp><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <xmp> - -#data -<!doctype html><iframe></iframe><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <iframe> - -#data -<!doctype html><select></select><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> - -#data -<!doctype html><svg></svg><frameset><frame> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <frame> - -#data -<!doctype html><math></math><frameset><frame> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <frame> - -#data -<!doctype html><svg><foreignObject><div> <frameset><frame> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <frame> - -#data -<!doctype html><svg>a</svg><frameset><frame> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <svg svg> -| "a" - -#data -<!doctype html><svg> </svg><frameset><frame> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> -| <frame> - -#data -<html>aaa<frameset></frameset> -#errors -#document -| <html> -| <head> -| <body> -| "aaa" - -#data -<html> a <frameset></frameset> -#errors -#document -| <html> -| <head> -| <body> -| "a " - -#data -<!doctype html><div><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> - -#data -<!doctype html><div><body><frameset> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <div> - -#data -<!doctype html><p><math></p>a -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <math math> -| "a" - -#data -<!doctype html><p><math><mn><span></p>a -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <math math> -| <math mn> -| <span> -| <p> -| "a" - -#data -<!doctype html><math></html> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <math math> - -#data -<!doctype html><meta charset="ascii"> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <meta> -| charset="ascii" -| <body> - -#data -<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii"> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <meta> -| content="text/html;charset=ascii" -| http-equiv="content-type" -| <body> - -#data -<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8"> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa --> -| <meta> -| charset="utf8" -| <body> - -#data -<!doctype html><html a=b><head></head><html c=d> -#errors -#document -| <!DOCTYPE html> -| <html> -| a="b" -| c="d" -| <head> -| <body> - -#data -<!doctype html><image/> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <img> - -#data -<!doctype html>a<i>b<table>c<b>d</i>e</b>f -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| "a" -| <i> -| "bc" -| <b> -| "de" -| "f" -| <table> - -#data -<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <i> -| "a" -| <b> -| "b" -| <b> -| <div> -| <b> -| <i> -| "c" -| <a> -| "d" -| <a> -| "e" -| <a> -| "f" -| <table> - -#data -<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <i> -| "a" -| <b> -| "b" -| <b> -| <div> -| <b> -| <i> -| "c" -| <a> -| "d" -| <a> -| "e" -| <a> -| "f" - -#data -<!doctype html><table><i>a<b>b<div>c</i> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <i> -| "a" -| <b> -| "b" -| <b> -| <div> -| <i> -| "c" -| <table> - -#data -<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <i> -| "a" -| <b> -| "b" -| <b> -| <div> -| <b> -| <i> -| "c" -| <a> -| "d" -| <a> -| "e" -| <a> -| "f" -| <table> - -#data -<!doctype html><table><i>a<div>b<tr>c<b>d</i>e -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <i> -| "a" -| <div> -| "b" -| <i> -| "c" -| <b> -| "d" -| <b> -| "e" -| <table> -| <tbody> -| <tr> - -#data -<!doctype html><table><td><table><i>a<div>b<b>c</i>d -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <td> -| <i> -| "a" -| <div> -| <i> -| "b" -| <b> -| "c" -| <b> -| "d" -| <table> - -#data -<!doctype html><body><bgsound> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <bgsound> - -#data -<!doctype html><body><basefont> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <basefont> - -#data -<!doctype html><a><b></a><basefont> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <a> -| <b> -| <basefont> - -#data -<!doctype html><a><b></a><bgsound> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <a> -| <b> -| <bgsound> - -#data -<!doctype html><figcaption><article></figcaption>a -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <figcaption> -| <article> -| "a" - -#data -<!doctype html><summary><article></summary>a -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <summary> -| <article> -| "a" - -#data -<!doctype html><p><a><plaintext>b -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <a> -| <plaintext> -| <a> -| "b" - -#data -<!DOCTYPE html><div>a<a></div>b<p>c</p>d -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <div> -| "a" -| <a> -| <a> -| "b" -| <p> -| "c" -| "d" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat deleted file mode 100644 index 60d85922..00000000 --- a/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat +++ /dev/null @@ -1,763 +0,0 @@ -#data -<!DOCTYPE html>Test -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| "Test" - -#data -<textarea>test</div>test -#errors -Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. -Line: 1 Col: 24 Expected closing tag. Unexpected end of file. -#document -| <html> -| <head> -| <body> -| <textarea> -| "test</div>test" - -#data -<table><td> -#errors -Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. -Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. -Line: 1 Col: 11 Expected closing tag. Unexpected end of file. -#document -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <td> - -#data -<table><td>test</tbody></table> -#errors -Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. -Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. -#document -| <html> -| <head> -| <body> -| <table> -| <tbody> -| <tr> -| <td> -| "test" - -#data -<frame>test -#errors -Line: 1 Col: 7 Unexpected start tag (frame). Expected DOCTYPE. -Line: 1 Col: 7 Unexpected start tag frame. Ignored. -#document -| <html> -| <head> -| <body> -| "test" - -#data -<!DOCTYPE html><frameset>test -#errors -Line: 1 Col: 29 Unepxected characters in the frameset phase. Characters ignored. -Line: 1 Col: 29 Expected closing tag. Unexpected end of file. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> - -#data -<!DOCTYPE html><frameset><!DOCTYPE html> -#errors -Line: 1 Col: 40 Unexpected DOCTYPE. Ignored. -Line: 1 Col: 40 Expected closing tag. Unexpected end of file. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <frameset> - -#data -<!DOCTYPE html><font><p><b>test</font> -#errors -Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. -Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <font> -| <p> -| <font> -| <b> -| "test" - -#data -<!DOCTYPE html><dt><div><dd> -#errors -Line: 1 Col: 28 Missing end tag (div, dt). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <dt> -| <div> -| <dd> - -#data -<script></x -#errors -Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. -Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). -#document -| <html> -| <head> -| <script> -| "</x" -| <body> - -#data -<table><plaintext><td> -#errors -Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. -Line: 1 Col: 18 Unexpected start tag (plaintext) in table context caused voodoo mode. -Line: 1 Col: 22 Unexpected end of file. Expected table content. -#document -| <html> -| <head> -| <body> -| <plaintext> -| "<td>" -| <table> - -#data -<plaintext></plaintext> -#errors -Line: 1 Col: 11 Unexpected start tag (plaintext). Expected DOCTYPE. -Line: 1 Col: 23 Expected closing tag. Unexpected end of file. -#document -| <html> -| <head> -| <body> -| <plaintext> -| "</plaintext>" - -#data -<!DOCTYPE html><table><tr>TEST -#errors -Line: 1 Col: 30 Unexpected non-space characters in table context caused voodoo mode. -Line: 1 Col: 30 Unexpected end of file. Expected table content. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| "TEST" -| <table> -| <tbody> -| <tr> - -#data -<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4> -#errors -Line: 1 Col: 37 Unexpected start tag (body). -Line: 1 Col: 53 Unexpected start tag (body). -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| t1="1" -| t2="2" -| t3="3" -| t4="4" - -#data -</b test -#errors -Line: 1 Col: 8 Unexpected end of file in attribute name. -Line: 1 Col: 8 End tag contains unexpected attributes. -Line: 1 Col: 8 Unexpected end tag (b). Expected DOCTYPE. -Line: 1 Col: 8 Unexpected end tag (b) after the (implied) root element. -#document -| <html> -| <head> -| <body> - -#data -<!DOCTYPE html></b test<b &=&>X -#errors -Line: 1 Col: 32 Named entity didn't end with ';'. -Line: 1 Col: 33 End tag contains unexpected attributes. -Line: 1 Col: 33 Unexpected end tag (b) after the (implied) root element. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| "X" - -#data -<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt -#errors -Line: 1 Col: 9 No space after literal string 'DOCTYPE'. -Line: 1 Col: 54 Unexpected end of file in the tag name. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <script> -| type="text/x-foobar;baz" -| "X</SCRipt" -| <body> - -#data -& -#errors -Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| "&" - -#data -&# -#errors -Line: 1 Col: 1 Numeric entity expected. Got end of file instead. -Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| "&#" - -#data -&#X -#errors -Line: 1 Col: 3 Numeric entity expected but none found. -Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| "&#X" - -#data -&#x -#errors -Line: 1 Col: 3 Numeric entity expected but none found. -Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| "&#x" - -#data -- -#errors -Line: 1 Col: 4 Numeric entity didn't end with ';'. -Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| "-" - -#data -&x-test -#errors -Line: 1 Col: 1 Named entity expected. Got none. -Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| "&x-test" - -#data -<!doctypehtml><p><li> -#errors -Line: 1 Col: 9 No space after literal string 'DOCTYPE'. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <li> - -#data -<!doctypehtml><p><dt> -#errors -Line: 1 Col: 9 No space after literal string 'DOCTYPE'. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <dt> - -#data -<!doctypehtml><p><dd> -#errors -Line: 1 Col: 9 No space after literal string 'DOCTYPE'. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <dd> - -#data -<!doctypehtml><p><form> -#errors -Line: 1 Col: 9 No space after literal string 'DOCTYPE'. -Line: 1 Col: 23 Expected closing tag. Unexpected end of file. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| <form> - -#data -<!DOCTYPE html><p></P>X -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <p> -| "X" - -#data -& -#errors -Line: 1 Col: 4 Named entity didn't end with ';'. -Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| "&" - -#data -&AMp; -#errors -Line: 1 Col: 1 Named entity expected. Got none. -Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| "&AMp;" - -#data -<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY> -#errors -Line: 1 Col: 110 Expected closing tag. Unexpected end of file. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly> - -#data -<!DOCTYPE html>X</body>X -#errors -Line: 1 Col: 24 Unexpected non-space characters in the after body phase. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| "XX" - -#data -<!DOCTYPE html><!-- X -#errors -Line: 1 Col: 21 Unexpected end of file in comment. -#document -| <!DOCTYPE html> -| <!-- X --> -| <html> -| <head> -| <body> - -#data -<!DOCTYPE html><table><caption>test TEST</caption><td>test -#errors -Line: 1 Col: 54 Unexpected table cell start tag (td) in the table body phase. -Line: 1 Col: 58 Expected closing tag. Unexpected end of file. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <table> -| <caption> -| "test TEST" -| <tbody> -| <tr> -| <td> -| "test" - -#data -<!DOCTYPE html><select><option><optgroup> -#errors -Line: 1 Col: 41 Expected closing tag. Unexpected end of file. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <option> -| <optgroup> - -#data -<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option> -#errors -Line: 1 Col: 68 Unexpected select start tag in the select phase treated as select end tag. -Line: 1 Col: 76 Expected closing tag. Unexpected end of file. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <optgroup> -| <option> -| <option> -| <option> - -#data -<!DOCTYPE html><select><optgroup><option><optgroup> -#errors -Line: 1 Col: 51 Expected closing tag. Unexpected end of file. -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <optgroup> -| <option> -| <optgroup> - -#data -<!DOCTYPE html><datalist><option>foo</datalist>bar -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <datalist> -| <option> -| "foo" -| "bar" - -#data -<!DOCTYPE html><font><input><input></font> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <font> -| <input> -| <input> - -#data -<!DOCTYPE html><!-- XXX - XXX --> -#errors -#document -| <!DOCTYPE html> -| <!-- XXX - XXX --> -| <html> -| <head> -| <body> - -#data -<!DOCTYPE html><!-- XXX - XXX -#errors -Line: 1 Col: 29 Unexpected end of file in comment (-) -#document -| <!DOCTYPE html> -| <!-- XXX - XXX --> -| <html> -| <head> -| <body> - -#data -<!DOCTYPE html><!-- XXX - XXX - XXX --> -#errors -#document -| <!DOCTYPE html> -| <!-- XXX - XXX - XXX --> -| <html> -| <head> -| <body> - -#data -<isindex test=x name=x> -#errors -Line: 1 Col: 23 Unexpected start tag (isindex). Expected DOCTYPE. -Line: 1 Col: 23 Unexpected start tag isindex. Don't use it! -#document -| <html> -| <head> -| <body> -| <form> -| <hr> -| <label> -| "This is a searchable index. Enter search keywords: " -| <input> -| name="isindex" -| test="x" -| <hr> - -#data -test -test -#errors -Line: 2 Col: 4 Unexpected non-space characters. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> -| "test -test" - -#data -<!DOCTYPE html><body><title>test</body> -#errors -#document -| -| -| -| -| -| "test</body>" - -#data -<!DOCTYPE html><body><title>X -#errors -#document -| -| -| -| -| -| "X" -| <meta> -| name="z" -| <link> -| rel="foo" -| <style> -| " -x { content:"</style" } " - -#data -<!DOCTYPE html><select><optgroup></optgroup></select> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> -| <select> -| <optgroup> - -#data - - -#errors -Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE. -#document -| <html> -| <head> -| <body> - -#data -<!DOCTYPE html> <html> -#errors -#document -| <!DOCTYPE html> -| <html> -| <head> -| <body> - -#data -<!DOCTYPE html><script> -</script> <title>x -#errors -#document -| -| -| -| -#errors -Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. -Line: 1 Col: 21 Unexpected start tag (script) that can be in head. Moved. -#document -| -| -| -#errors -Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. -Line: 1 Col: 28 Unexpected start tag (style) that can be in head. Moved. -#document -| -| -| -#errors -Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. -#document -| -| -| -| -| "x" -| x -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -Line: 1 Col: 22 Unexpected end of file. Expected end tag (style). -#document -| -| -| --> x -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -#document -| -| -| x -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -#document -| -| -| x -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -#document -| -| -| x -#errors -Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. -#document -| -| -|

-#errors -#document -| -| -| -| -| -| ddd -#errors -#document -| -| -| -#errors -#document -| -| -| -| -|
  • -| -| ", - " -
    << Back to Go HTTP/2 demo server`) - }) -} - -func httpsHost() string { - if *hostHTTPS != "" { - return *hostHTTPS - } - if v := *httpsAddr; strings.HasPrefix(v, ":") { - return "localhost" + v - } else { - return v - } -} - -func httpHost() string { - if *hostHTTP != "" { - return *hostHTTP - } - if v := *httpAddr; strings.HasPrefix(v, ":") { - return "localhost" + v - } else { - return v - } -} - -func serveProdTLS() error { - const cacheDir = "/var/cache/autocert" - if err := os.MkdirAll(cacheDir, 0700); err != nil { - return err - } - m := autocert.Manager{ - Cache: autocert.DirCache(cacheDir), - Prompt: autocert.AcceptTOS, - HostPolicy: autocert.HostWhitelist("http2.golang.org"), - } - srv := &http.Server{ - TLSConfig: &tls.Config{ - GetCertificate: m.GetCertificate, - }, - } - http2.ConfigureServer(srv, &http2.Server{ - NewWriteScheduler: func() http2.WriteScheduler { - return http2.NewPriorityWriteScheduler(nil) - }, - }) - ln, err := net.Listen("tcp", ":443") - if err != nil { - return err - } - return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)) -} - -type tcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -func serveProd() error { - errc := make(chan error, 2) - go func() { errc <- http.ListenAndServe(":80", nil) }() - go func() { errc <- serveProdTLS() }() - return <-errc -} - -const idleTimeout = 5 * time.Minute -const activeTimeout = 10 * time.Minute - -// TODO: put this into the standard library and actually send -// PING frames and GOAWAY, etc: golang.org/issue/14204 -func idleTimeoutHook() func(net.Conn, http.ConnState) { - var mu sync.Mutex - m := map[net.Conn]*time.Timer{} - return func(c net.Conn, cs http.ConnState) { - mu.Lock() - defer mu.Unlock() - if t, ok := m[c]; ok { - delete(m, c) - t.Stop() - } - var d time.Duration - switch cs { - case http.StateNew, http.StateIdle: - d = idleTimeout - case http.StateActive: - d = activeTimeout - default: - return - } - m[c] = time.AfterFunc(d, func() { - log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d) - go c.Close() - }) - } -} - -func main() { - var srv http.Server - flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.") - flag.Parse() - srv.Addr = *httpsAddr - srv.ConnState = idleTimeoutHook() - - registerHandlers() - - if *prod { - *hostHTTP = "http2.golang.org" - *hostHTTPS = "http2.golang.org" - log.Fatal(serveProd()) - } - - url := "https://" + httpsHost() + "/" - log.Printf("Listening on " + url) - http2.ConfigureServer(&srv, &http2.Server{}) - - if *httpAddr != "" { - go func() { - log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)") - log.Fatal(http.ListenAndServe(*httpAddr, nil)) - }() - } - - go func() { - log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key")) - }() - select {} -} diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go deleted file mode 100644 index df0866a3..00000000 --- a/vendor/golang.org/x/net/http2/h2demo/launch.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - compute "google.golang.org/api/compute/v1" -) - -var ( - proj = flag.String("project", "symbolic-datum-552", "name of Project") - zone = flag.String("zone", "us-central1-a", "GCE zone") - mach = flag.String("machinetype", "n1-standard-1", "Machine type") - instName = flag.String("instance_name", "http2-demo", "Name of VM instance.") - sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.") - staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.") - - writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.") - publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.") -) - -func readFile(v string) string { - slurp, err := ioutil.ReadFile(v) - if err != nil { - log.Fatalf("Error reading %s: %v", v, err) - } - return strings.TrimSpace(string(slurp)) -} - -var config = &oauth2.Config{ - // The client-id and secret should be for an "Installed Application" when using - // the CLI. Later we'll use a web application with a callback. - ClientID: readFile("client-id.dat"), - ClientSecret: readFile("client-secret.dat"), - Endpoint: google.Endpoint, - Scopes: []string{ - compute.DevstorageFullControlScope, - compute.ComputeScope, - "https://www.googleapis.com/auth/sqlservice", - "https://www.googleapis.com/auth/sqlservice.admin", - }, - RedirectURL: "urn:ietf:wg:oauth:2.0:oob", -} - -const baseConfig = `#cloud-config -coreos: - units: - - name: h2demo.service - command: start - content: | - [Unit] - Description=HTTP2 Demo - - [Service] - ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo' - ExecStart=/opt/bin/h2demo --prod - RestartSec=5s - Restart=always - Type=simple - - [Install] - WantedBy=multi-user.target -` - -func main() { - flag.Parse() - if *proj == "" { - log.Fatalf("Missing --project flag") - } - prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj - machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach - - const tokenFileName = "token.dat" - tokenFile := tokenCacheFile(tokenFileName) - tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) - token, err := tokenSource.Token() - if err != nil { - if *writeObject != "" { - log.Fatalf("Can't use --write_object without a valid token.dat file already cached.") - } - log.Printf("Error getting token from %s: %v", tokenFileName, err) - log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) - fmt.Print("\nEnter auth code: ") - sc := bufio.NewScanner(os.Stdin) - sc.Scan() - authCode := strings.TrimSpace(sc.Text()) - token, err = config.Exchange(oauth2.NoContext, authCode) - if err != nil { - log.Fatalf("Error exchanging auth code for a token: %v", err) - } - if err := tokenFile.WriteToken(token); err != nil { - log.Fatalf("Error writing to %s: %v", tokenFileName, err) - } - tokenSource = oauth2.ReuseTokenSource(token, nil) - } - - oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) - - if *writeObject != "" { - writeCloudStorageObject(oauthClient) - return - } - - computeService, _ := compute.New(oauthClient) - - natIP := *staticIP - if natIP == "" { - // Try to find it by name. - aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do() - if err != nil { - log.Fatal(err) - } - // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList - IPLoop: - for _, asl := range aggAddrList.Items { - for _, addr := range asl.Addresses { - if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" { - natIP = addr.Address - break IPLoop - } - } - } - } - - cloudConfig := baseConfig - if *sshPub != "" { - key := strings.TrimSpace(readFile(*sshPub)) - cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key) - } - if os.Getenv("USER") == "bradfitz" { - cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com") - } - const maxCloudConfig = 32 << 10 // per compute API docs - if len(cloudConfig) > maxCloudConfig { - log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig) - } - - instance := &compute.Instance{ - Name: *instName, - Description: "Go Builder", - MachineType: machType, - Disks: []*compute.AttachedDisk{instanceDisk(computeService)}, - Tags: &compute.Tags{ - Items: []string{"http-server", "https-server"}, - }, - Metadata: &compute.Metadata{ - Items: []*compute.MetadataItems{ - { - Key: "user-data", - Value: &cloudConfig, - }, - }, - }, - NetworkInterfaces: []*compute.NetworkInterface{ - { - AccessConfigs: []*compute.AccessConfig{ - { - Type: "ONE_TO_ONE_NAT", - Name: "External NAT", - NatIP: natIP, - }, - }, - Network: prefix + "/global/networks/default", - }, - }, - ServiceAccounts: []*compute.ServiceAccount{ - { - Email: "default", - Scopes: []string{ - compute.DevstorageFullControlScope, - compute.ComputeScope, - }, - }, - }, - } - - log.Printf("Creating instance...") - op, err := computeService.Instances.Insert(*proj, *zone, instance).Do() - if err != nil { - log.Fatalf("Failed to create instance: %v", err) - } - opName := op.Name - log.Printf("Created. Waiting on operation %v", opName) -OpLoop: - for { - time.Sleep(2 * time.Second) - op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do() - if err != nil { - log.Fatalf("Failed to get op %s: %v", opName, err) - } - switch op.Status { - case "PENDING", "RUNNING": - log.Printf("Waiting on operation %v", opName) - continue - case "DONE": - if op.Error != nil { - for _, operr := range op.Error.Errors { - log.Printf("Error: %+v", operr) - } - log.Fatalf("Failed to start.") - } - log.Printf("Success. %+v", op) - break OpLoop - default: - log.Fatalf("Unknown status %q: %+v", op.Status, op) - } - } - - inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do() - if err != nil { - log.Fatalf("Error getting instance after creation: %v", err) - } - ij, _ := json.MarshalIndent(inst, "", " ") - log.Printf("Instance: %s", ij) -} - -func instanceDisk(svc *compute.Service) *compute.AttachedDisk { - const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" - diskName := *instName + "-disk" - - return &compute.AttachedDisk{ - AutoDelete: true, - Boot: true, - Type: "PERSISTENT", - InitializeParams: &compute.AttachedDiskInitializeParams{ - DiskName: diskName, - SourceImage: imageURL, - DiskSizeGb: 50, - }, - } -} - -func writeCloudStorageObject(httpClient *http.Client) { - content := os.Stdin - const maxSlurp = 1 << 20 - var buf bytes.Buffer - n, err := io.CopyN(&buf, content, maxSlurp) - if err != nil && err != io.EOF { - log.Fatalf("Error reading from stdin: %v, %v", n, err) - } - contentType := http.DetectContentType(buf.Bytes()) - - req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content)) - if err != nil { - log.Fatal(err) - } - req.Header.Set("x-goog-api-version", "2") - if *publicObject { - req.Header.Set("x-goog-acl", "public-read") - } - req.Header.Set("Content-Type", contentType) - res, err := httpClient.Do(req) - if err != nil { - log.Fatal(err) - } - if res.StatusCode != 200 { - res.Write(os.Stderr) - log.Fatalf("Failed.") - } - log.Printf("Success.") - os.Exit(0) -} - -type tokenCacheFile string - -func (f tokenCacheFile) Token() (*oauth2.Token, error) { - slurp, err := ioutil.ReadFile(string(f)) - if err != nil { - return nil, err - } - t := new(oauth2.Token) - if err := json.Unmarshal(slurp, t); err != nil { - return nil, err - } - return t, nil -} - -func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { - jt, err := json.Marshal(t) - if err != nil { - return err - } - return ioutil.WriteFile(string(f), jt, 0600) -} diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key deleted file mode 100644 index a15a6aba..00000000 --- a/vendor/golang.org/x/net/http2/h2demo/rootCA.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q -62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby -XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV -mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ -JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ -SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA -nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e -/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx -qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser -hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j -NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E -LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7 -8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c -0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws -K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd -bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo -QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt -Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1 -nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy -b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7 -gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev -WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr -C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj -x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA -hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y ------END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem deleted file mode 100644 index 3a323e77..00000000 --- a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG -A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 -DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 -NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv -c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS -R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT -ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk -JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 -mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW -caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G -A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt -hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB -MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES -MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv -bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h -U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao -eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 -UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD -58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n -sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF -kPe6XoSbiLm/kxk32T0= ------END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl deleted file mode 100644 index 6db38918..00000000 --- a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -E2CE26BF3285059C diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt deleted file mode 100644 index c59059bd..00000000 --- a/vendor/golang.org/x/net/http2/h2demo/server.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV -UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT -C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW -DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow -RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE -ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l -gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2 -dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL -A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws -/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88 -F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB -AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R -rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD -EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19 -KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI -dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU -90p6/CbU71bGbfpM2PHot2fm ------END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key deleted file mode 100644 index f329c142..00000000 --- a/vendor/golang.org/x/net/http2/h2demo/server.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi -fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm -J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef -b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55 -mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/ -fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p -3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3 -qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4 -NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80 -LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN -a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+ -Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL -W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO -gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm -S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS -Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp -V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4 -KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4 -yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5 -drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e -ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R -48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5 -c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY -nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl -IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd ------END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/tmpl.go b/vendor/golang.org/x/net/http2/h2demo/tmpl.go deleted file mode 100644 index 504d6a78..00000000 --- a/vendor/golang.org/x/net/http2/h2demo/tmpl.go +++ /dev/null @@ -1,1991 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build h2demo - -package main - -import "html/template" - -var pushTmpl = template.Must(template.New("serverpush").Parse(` - - - - - - - - - HTTP/2 Server Push Demo - - - - - - - - - -
    -Note: This page exists for demonstration purposes. For the actual cmd/go docs, go to golang.org/cmd/go. -
    - - - -
    -... -
    - - - - -
    -
    -
    -
    - Run - Format - - - -
    -
    - - -
    -
    - - -

    Command go

    - - - - - - - - - - - - - - -

    -Go is a tool for managing Go source code. -

    -

    -Usage: -

    -
    go command [arguments]
    -
    -

    -The commands are: -

    -
    build       compile packages and dependencies
    -clean       remove object files
    -doc         show documentation for package or symbol
    -env         print Go environment information
    -bug         start a bug report
    -fix         run go tool fix on packages
    -fmt         run gofmt on package sources
    -generate    generate Go files by processing source
    -get         download and install packages and dependencies
    -install     compile and install packages and dependencies
    -list        list packages
    -run         compile and run Go program
    -test        test packages
    -tool        run specified go tool
    -version     print Go version
    -vet         run go tool vet on packages
    -
    -

    -Use "go help [command]" for more information about a command. -

    -

    -Additional help topics: -

    -
    c           calling between Go and C
    -buildmode   description of build modes
    -filetype    file types
    -gopath      GOPATH environment variable
    -environment environment variables
    -importpath  import path syntax
    -packages    description of package lists
    -testflag    description of testing flags
    -testfunc    description of testing functions
    -
    -

    -Use "go help [topic]" for more information about that topic. -

    -

    Compile packages and dependencies

    -

    -Usage: -

    -
    go build [-o output] [-i] [build flags] [packages]
    -
    -

    -Build compiles the packages named by the import paths, -along with their dependencies, but it does not install the results. -

    -

    -If the arguments to build are a list of .go files, build treats -them as a list of source files specifying a single package. -

    -

    -When compiling a single main package, build writes -the resulting executable to an output file named after -the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe') -or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe'). -The '.exe' suffix is added when writing a Windows executable. -

    -

    -When compiling multiple packages or a single non-main package, -build compiles the packages but discards the resulting object, -serving only as a check that the packages can be built. -

    -

    -When compiling packages, build ignores files that end in '_test.go'. -

    -

    -The -o flag, only allowed when compiling a single package, -forces build to write the resulting executable or object -to the named output file, instead of the default behavior described -in the last two paragraphs. -

    -

    -The -i flag installs the packages that are dependencies of the target. -

    -

    -The build flags are shared by the build, clean, get, install, list, run, -and test commands: -

    -
    -a
    -	force rebuilding of packages that are already up-to-date.
    --n
    -	print the commands but do not run them.
    --p n
    -	the number of programs, such as build commands or
    -	test binaries, that can be run in parallel.
    -	The default is the number of CPUs available.
    --race
    -	enable data race detection.
    -	Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
    --msan
    -	enable interoperation with memory sanitizer.
    -	Supported only on linux/amd64,
    -	and only with Clang/LLVM as the host C compiler.
    --v
    -	print the names of packages as they are compiled.
    --work
    -	print the name of the temporary work directory and
    -	do not delete it when exiting.
    --x
    -	print the commands.
    -
    --asmflags 'flag list'
    -	arguments to pass on each go tool asm invocation.
    --buildmode mode
    -	build mode to use. See 'go help buildmode' for more.
    --compiler name
    -	name of compiler to use, as in runtime.Compiler (gccgo or gc).
    --gccgoflags 'arg list'
    -	arguments to pass on each gccgo compiler/linker invocation.
    --gcflags 'arg list'
    -	arguments to pass on each go tool compile invocation.
    --installsuffix suffix
    -	a suffix to use in the name of the package installation directory,
    -	in order to keep output separate from default builds.
    -	If using the -race flag, the install suffix is automatically set to race
    -	or, if set explicitly, has _race appended to it.  Likewise for the -msan
    -	flag.  Using a -buildmode option that requires non-default compile flags
    -	has a similar effect.
    --ldflags 'flag list'
    -	arguments to pass on each go tool link invocation.
    --linkshared
    -	link against shared libraries previously created with
    -	-buildmode=shared.
    --pkgdir dir
    -	install and load all packages from dir instead of the usual locations.
    -	For example, when building with a non-standard configuration,
    -	use -pkgdir to keep generated packages in a separate location.
    --tags 'tag list'
    -	a list of build tags to consider satisfied during the build.
    -	For more information about build tags, see the description of
    -	build constraints in the documentation for the go/build package.
    --toolexec 'cmd args'
    -	a program to use to invoke toolchain programs like vet and asm.
    -	For example, instead of running asm, the go command will run
    -	'cmd args /path/to/asm <arguments for asm>'.
    -
    -

    -The list flags accept a space-separated list of strings. To embed spaces -in an element in the list, surround it with either single or double quotes. -

    -

    -For more about specifying packages, see 'go help packages'. -For more about where packages and binaries are installed, -run 'go help gopath'. -For more about calling between Go and C/C++, run 'go help c'. -

    -

    -Note: Build adheres to certain conventions such as those described -by 'go help gopath'. Not all projects can follow these conventions, -however. Installations that have their own conventions or that use -a separate software build system may choose to use lower-level -invocations such as 'go tool compile' and 'go tool link' to avoid -some of the overheads and design decisions of the build tool. -

    -

    -See also: go install, go get, go clean. -

    -

    Remove object files

    -

    -Usage: -

    -
    go clean [-i] [-r] [-n] [-x] [build flags] [packages]
    -
    -

    -Clean removes object files from package source directories. -The go command builds most objects in a temporary directory, -so go clean is mainly concerned with object files left by other -tools or by manual invocations of go build. -

    -

    -Specifically, clean removes the following files from each of the -source directories corresponding to the import paths: -

    -
    _obj/            old object directory, left from Makefiles
    -_test/           old test directory, left from Makefiles
    -_testmain.go     old gotest file, left from Makefiles
    -test.out         old test log, left from Makefiles
    -build.out        old test log, left from Makefiles
    -*.[568ao]        object files, left from Makefiles
    -
    -DIR(.exe)        from go build
    -DIR.test(.exe)   from go test -c
    -MAINFILE(.exe)   from go build MAINFILE.go
    -*.so             from SWIG
    -
    -

    -In the list, DIR represents the final path element of the -directory, and MAINFILE is the base name of any Go source -file in the directory that is not included when building -the package. -

    -

    -The -i flag causes clean to remove the corresponding installed -archive or binary (what 'go install' would create). -

    -

    -The -n flag causes clean to print the remove commands it would execute, -but not run them. -

    -

    -The -r flag causes clean to be applied recursively to all the -dependencies of the packages named by the import paths. -

    -

    -The -x flag causes clean to print remove commands as it executes them. -

    -

    -For more about build flags, see 'go help build'. -

    -

    -For more about specifying packages, see 'go help packages'. -

    -

    Show documentation for package or symbol

    -

    -Usage: -

    -
    go doc [-u] [-c] [package|[package.]symbol[.method]]
    -
    -

    -Doc prints the documentation comments associated with the item identified by its -arguments (a package, const, func, type, var, or method) followed by a one-line -summary of each of the first-level items "under" that item (package-level -declarations for a package, methods for a type, etc.). -

    -

    -Doc accepts zero, one, or two arguments. -

    -

    -Given no arguments, that is, when run as -

    -
    go doc
    -
    -

    -it prints the package documentation for the package in the current directory. -If the package is a command (package main), the exported symbols of the package -are elided from the presentation unless the -cmd flag is provided. -

    -

    -When run with one argument, the argument is treated as a Go-syntax-like -representation of the item to be documented. What the argument selects depends -on what is installed in GOROOT and GOPATH, as well as the form of the argument, -which is schematically one of these: -

    -
    go doc <pkg>
    -go doc <sym>[.<method>]
    -go doc [<pkg>.]<sym>[.<method>]
    -go doc [<pkg>.][<sym>.]<method>
    -
    -

    -The first item in this list matched by the argument is the one whose documentation -is printed. (See the examples below.) However, if the argument starts with a capital -letter it is assumed to identify a symbol or method in the current directory. -

    -

    -For packages, the order of scanning is determined lexically in breadth-first order. -That is, the package presented is the one that matches the search and is nearest -the root and lexically first at its level of the hierarchy. The GOROOT tree is -always scanned in its entirety before GOPATH. -

    -

    -If there is no package specified or matched, the package in the current -directory is selected, so "go doc Foo" shows the documentation for symbol Foo in -the current package. -

    -

    -The package path must be either a qualified path or a proper suffix of a -path. The go tool's usual package mechanism does not apply: package path -elements like . and ... are not implemented by go doc. -

    -

    -When run with two arguments, the first must be a full package path (not just a -suffix), and the second is a symbol or symbol and method; this is similar to the -syntax accepted by godoc: -

    -
    go doc <pkg> <sym>[.<method>]
    -
    -

    -In all forms, when matching symbols, lower-case letters in the argument match -either case but upper-case letters match exactly. This means that there may be -multiple matches of a lower-case argument in a package if different symbols have -different cases. If this occurs, documentation for all matches is printed. -

    -

    -Examples: -

    -
    go doc
    -	Show documentation for current package.
    -go doc Foo
    -	Show documentation for Foo in the current package.
    -	(Foo starts with a capital letter so it cannot match
    -	a package path.)
    -go doc encoding/json
    -	Show documentation for the encoding/json package.
    -go doc json
    -	Shorthand for encoding/json.
    -go doc json.Number (or go doc json.number)
    -	Show documentation and method summary for json.Number.
    -go doc json.Number.Int64 (or go doc json.number.int64)
    -	Show documentation for json.Number's Int64 method.
    -go doc cmd/doc
    -	Show package docs for the doc command.
    -go doc -cmd cmd/doc
    -	Show package docs and exported symbols within the doc command.
    -go doc template.new
    -	Show documentation for html/template's New function.
    -	(html/template is lexically before text/template)
    -go doc text/template.new # One argument
    -	Show documentation for text/template's New function.
    -go doc text/template new # Two arguments
    -	Show documentation for text/template's New function.
    -
    -At least in the current tree, these invocations all print the
    -documentation for json.Decoder's Decode method:
    -
    -go doc json.Decoder.Decode
    -go doc json.decoder.decode
    -go doc json.decode
    -cd go/src/encoding/json; go doc decode
    -
    -

    -Flags: -

    -
    -c
    -	Respect case when matching symbols.
    --cmd
    -	Treat a command (package main) like a regular package.
    -	Otherwise package main's exported symbols are hidden
    -	when showing the package's top-level documentation.
    --u
    -	Show documentation for unexported as well as exported
    -	symbols and methods.
    -
    -

    Print Go environment information

    -

    -Usage: -

    -
    go env [var ...]
    -
    -

    -Env prints Go environment information. -

    -

    -By default env prints information as a shell script -(on Windows, a batch file). If one or more variable -names is given as arguments, env prints the value of -each named variable on its own line. -

    -

    Start a bug report

    -

    -Usage: -

    -
    go bug
    -
    -

    -Bug opens the default browser and starts a new bug report. -The report includes useful system information. -

    -

    Run go tool fix on packages

    -

    -Usage: -

    -
    go fix [packages]
    -
    -

    -Fix runs the Go fix command on the packages named by the import paths. -

    -

    -For more about fix, see 'go doc cmd/fix'. -For more about specifying packages, see 'go help packages'. -

    -

    -To run fix with specific options, run 'go tool fix'. -

    -

    -See also: go fmt, go vet. -

    -

    Run gofmt on package sources

    -

    -Usage: -

    -
    go fmt [-n] [-x] [packages]
    -
    -

    -Fmt runs the command 'gofmt -l -w' on the packages named -by the import paths. It prints the names of the files that are modified. -

    -

    -For more about gofmt, see 'go doc cmd/gofmt'. -For more about specifying packages, see 'go help packages'. -

    -

    -The -n flag prints commands that would be executed. -The -x flag prints commands as they are executed. -

    -

    -To run gofmt with specific options, run gofmt itself. -

    -

    -See also: go fix, go vet. -

    -

    Generate Go files by processing source

    -

    -Usage: -

    -
    go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
    -
    -

    -Generate runs commands described by directives within existing -files. Those commands can run any process but the intent is to -create or update Go source files. -

    -

    -Go generate is never run automatically by go build, go get, go test, -and so on. It must be run explicitly. -

    -

    -Go generate scans the file for directives, which are lines of -the form, -

    -
    //go:generate command argument...
    -
    -

    -(note: no leading spaces and no space in "//go") where command -is the generator to be run, corresponding to an executable file -that can be run locally. It must either be in the shell path -(gofmt), a fully qualified path (/usr/you/bin/mytool), or a -command alias, described below. -

    -

    -Note that go generate does not parse the file, so lines that look -like directives in comments or multiline strings will be treated -as directives. -

    -

    -The arguments to the directive are space-separated tokens or -double-quoted strings passed to the generator as individual -arguments when it is run. -

    -

    -Quoted strings use Go syntax and are evaluated before execution; a -quoted string appears as a single argument to the generator. -

    -

    -Go generate sets several variables when it runs the generator: -

    -
    $GOARCH
    -	The execution architecture (arm, amd64, etc.)
    -$GOOS
    -	The execution operating system (linux, windows, etc.)
    -$GOFILE
    -	The base name of the file.
    -$GOLINE
    -	The line number of the directive in the source file.
    -$GOPACKAGE
    -	The name of the package of the file containing the directive.
    -$DOLLAR
    -	A dollar sign.
    -
    -

    -Other than variable substitution and quoted-string evaluation, no -special processing such as "globbing" is performed on the command -line. -

    -

    -As a last step before running the command, any invocations of any -environment variables with alphanumeric names, such as $GOFILE or -$HOME, are expanded throughout the command line. The syntax for -variable expansion is $NAME on all operating systems. Due to the -order of evaluation, variables are expanded even inside quoted -strings. If the variable NAME is not set, $NAME expands to the -empty string. -

    -

    -A directive of the form, -

    -
    //go:generate -command xxx args...
    -
    -

    -specifies, for the remainder of this source file only, that the -string xxx represents the command identified by the arguments. This -can be used to create aliases or to handle multiword generators. -For example, -

    -
    //go:generate -command foo go tool foo
    -
    -

    -specifies that the command "foo" represents the generator -"go tool foo". -

    -

    -Generate processes packages in the order given on the command line, -one at a time. If the command line lists .go files, they are treated -as a single package. Within a package, generate processes the -source files in a package in file name order, one at a time. Within -a source file, generate runs generators in the order they appear -in the file, one at a time. -

    -

    -If any generator returns an error exit status, "go generate" skips -all further processing for that package. -

    -

    -The generator is run in the package's source directory. -

    -

    -Go generate accepts one specific flag: -

    -
    -run=""
    -	if non-empty, specifies a regular expression to select
    -	directives whose full original source text (excluding
    -	any trailing spaces and final newline) matches the
    -	expression.
    -
    -

    -It also accepts the standard build flags including -v, -n, and -x. -The -v flag prints the names of packages and files as they are -processed. -The -n flag prints commands that would be executed. -The -x flag prints commands as they are executed. -

    -

    -For more about build flags, see 'go help build'. -

    -

    -For more about specifying packages, see 'go help packages'. -

    -

    Download and install packages and dependencies

    -

    -Usage: -

    -
    go get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]
    -
    -

    -Get downloads the packages named by the import paths, along with their -dependencies. It then installs the named packages, like 'go install'. -

    -

    -The -d flag instructs get to stop after downloading the packages; that is, -it instructs get not to install the packages. -

    -

    -The -f flag, valid only when -u is set, forces get -u not to verify that -each package has been checked out from the source control repository -implied by its import path. This can be useful if the source is a local fork -of the original. -

    -

    -The -fix flag instructs get to run the fix tool on the downloaded packages -before resolving dependencies or building the code. -

    -

    -The -insecure flag permits fetching from repositories and resolving -custom domains using insecure schemes such as HTTP. Use with caution. -

    -

    -The -t flag instructs get to also download the packages required to build -the tests for the specified packages. -

    -

    -The -u flag instructs get to use the network to update the named packages -and their dependencies. By default, get uses the network to check out -missing packages but does not use it to look for updates to existing packages. -

    -

    -The -v flag enables verbose progress and debug output. -

    -

    -Get also accepts build flags to control the installation. See 'go help build'. -

    -

    -When checking out a new package, get creates the target directory -GOPATH/src/<import-path>. If the GOPATH contains multiple entries, -get uses the first one. For more details see: 'go help gopath'. -

    -

    -When checking out or updating a package, get looks for a branch or tag -that matches the locally installed version of Go. The most important -rule is that if the local installation is running version "go1", get -searches for a branch or tag named "go1". If no such version exists it -retrieves the most recent version of the package. -

    -

    -When go get checks out or updates a Git repository, -it also updates any git submodules referenced by the repository. -

    -

    -Get never checks out or updates code stored in vendor directories. -

    -

    -For more about specifying packages, see 'go help packages'. -

    -

    -For more about how 'go get' finds source code to -download, see 'go help importpath'. -

    -

    -See also: go build, go install, go clean. -

    -

    Compile and install packages and dependencies

    -

    -Usage: -

    -
    go install [build flags] [packages]
    -
    -

    -Install compiles and installs the packages named by the import paths, -along with their dependencies. -

    -

    -For more about the build flags, see 'go help build'. -For more about specifying packages, see 'go help packages'. -

    -

    -See also: go build, go get, go clean. -

    -

    List packages

    -

    -Usage: -

    -
    go list [-e] [-f format] [-json] [build flags] [packages]
    -
    -

    -List lists the packages named by the import paths, one per line. -

    -

    -The default output shows the package import path: -

    -
    bytes
    -encoding/json
    -github.com/gorilla/mux
    -golang.org/x/net/html
    -
    -

    -The -f flag specifies an alternate format for the list, using the -syntax of package template. The default output is equivalent to -f -''. The struct being passed to the template is: -

    -
    type Package struct {
    -    Dir           string // directory containing package sources
    -    ImportPath    string // import path of package in dir
    -    ImportComment string // path in import comment on package statement
    -    Name          string // package name
    -    Doc           string // package documentation string
    -    Target        string // install path
    -    Shlib         string // the shared library that contains this package (only set when -linkshared)
    -    Goroot        bool   // is this package in the Go root?
    -    Standard      bool   // is this package part of the standard Go library?
    -    Stale         bool   // would 'go install' do anything for this package?
    -    StaleReason   string // explanation for Stale==true
    -    Root          string // Go root or Go path dir containing this package
    -    ConflictDir   string // this directory shadows Dir in $GOPATH
    -    BinaryOnly    bool   // binary-only package: cannot be recompiled from sources
    -
    -    // Source files
    -    GoFiles        []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
    -    CgoFiles       []string // .go sources files that import "C"
    -    IgnoredGoFiles []string // .go sources ignored due to build constraints
    -    CFiles         []string // .c source files
    -    CXXFiles       []string // .cc, .cxx and .cpp source files
    -    MFiles         []string // .m source files
    -    HFiles         []string // .h, .hh, .hpp and .hxx source files
    -    FFiles         []string // .f, .F, .for and .f90 Fortran source files
    -    SFiles         []string // .s source files
    -    SwigFiles      []string // .swig files
    -    SwigCXXFiles   []string // .swigcxx files
    -    SysoFiles      []string // .syso object files to add to archive
    -    TestGoFiles    []string // _test.go files in package
    -    XTestGoFiles   []string // _test.go files outside package
    -
    -    // Cgo directives
    -    CgoCFLAGS    []string // cgo: flags for C compiler
    -    CgoCPPFLAGS  []string // cgo: flags for C preprocessor
    -    CgoCXXFLAGS  []string // cgo: flags for C++ compiler
    -    CgoFFLAGS    []string // cgo: flags for Fortran compiler
    -    CgoLDFLAGS   []string // cgo: flags for linker
    -    CgoPkgConfig []string // cgo: pkg-config names
    -
    -    // Dependency information
    -    Imports      []string // import paths used by this package
    -    Deps         []string // all (recursively) imported dependencies
    -    TestImports  []string // imports from TestGoFiles
    -    XTestImports []string // imports from XTestGoFiles
    -
    -    // Error information
    -    Incomplete bool            // this package or a dependency has an error
    -    Error      *PackageError   // error loading package
    -    DepsErrors []*PackageError // errors loading dependencies
    -}
    -
    -

    -Packages stored in vendor directories report an ImportPath that includes the -path to the vendor directory (for example, "d/vendor/p" instead of "p"), -so that the ImportPath uniquely identifies a given copy of a package. -The Imports, Deps, TestImports, and XTestImports lists also contain these -expanded imports paths. See golang.org/s/go15vendor for more about vendoring. -

    -

    -The error information, if any, is -

    -
    type PackageError struct {
    -    ImportStack   []string // shortest path from package named on command line to this one
    -    Pos           string   // position of error (if present, file:line:col)
    -    Err           string   // the error itself
    -}
    -
    -

    -The template function "join" calls strings.Join. -

    -

    -The template function "context" returns the build context, defined as: -

    -
    type Context struct {
    -	GOARCH        string   // target architecture
    -	GOOS          string   // target operating system
    -	GOROOT        string   // Go root
    -	GOPATH        string   // Go path
    -	CgoEnabled    bool     // whether cgo can be used
    -	UseAllFiles   bool     // use files regardless of +build lines, file names
    -	Compiler      string   // compiler to assume when computing target paths
    -	BuildTags     []string // build constraints to match in +build lines
    -	ReleaseTags   []string // releases the current release is compatible with
    -	InstallSuffix string   // suffix to use in the name of the install dir
    -}
    -
    -

    -For more information about the meaning of these fields see the documentation -for the go/build package's Context type. -

    -

    -The -json flag causes the package data to be printed in JSON format -instead of using the template format. -

    -

    -The -e flag changes the handling of erroneous packages, those that -cannot be found or are malformed. By default, the list command -prints an error to standard error for each erroneous package and -omits the packages from consideration during the usual printing. -With the -e flag, the list command never prints errors to standard -error and instead processes the erroneous packages with the usual -printing. Erroneous packages will have a non-empty ImportPath and -a non-nil Error field; other information may or may not be missing -(zeroed). -

    -

    -For more about build flags, see 'go help build'. -

    -

    -For more about specifying packages, see 'go help packages'. -

    -

    Compile and run Go program

    -

    -Usage: -

    -
    go run [build flags] [-exec xprog] gofiles... [arguments...]
    -
    -

    -Run compiles and runs the main package comprising the named Go source files. -A Go source file is defined to be a file ending in a literal ".go" suffix. -

    -

    -By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. -If the -exec flag is given, 'go run' invokes the binary using xprog: -

    -
    'xprog a.out arguments...'.
    -
    -

    -If the -exec flag is not given, GOOS or GOARCH is different from the system -default, and a program named go_$GOOS_$GOARCH_exec can be found -on the current search path, 'go run' invokes the binary using that program, -for example 'go_nacl_386_exec a.out arguments...'. This allows execution of -cross-compiled programs when a simulator or other execution method is -available. -

    -

    -For more about build flags, see 'go help build'. -

    -

    -See also: go build. -

    -

    Test packages

    -

    -Usage: -

    -
    go test [build/test flags] [packages] [build/test flags & test binary flags]
    -
    -

    -'Go test' automates testing the packages named by the import paths. -It prints a summary of the test results in the format: -

    -
    ok   archive/tar   0.011s
    -FAIL archive/zip   0.022s
    -ok   compress/gzip 0.033s
    -...
    -
    -

    -followed by detailed output for each failed package. -

    -

    -'Go test' recompiles each package along with any files with names matching -the file pattern "*_test.go". -Files whose names begin with "_" (including "_test.go") or "." are ignored. -These additional files can contain test functions, benchmark functions, and -example functions. See 'go help testfunc' for more. -Each listed package causes the execution of a separate test binary. -

    -

    -Test files that declare a package with the suffix "_test" will be compiled as a -separate package, and then linked and run with the main test binary. -

    -

    -The go tool will ignore a directory named "testdata", making it available -to hold ancillary data needed by the tests. -

    -

    -By default, go test needs no arguments. It compiles and tests the package -with source in the current directory, including tests, and runs the tests. -

    -

    -The package is built in a temporary directory so it does not interfere with the -non-test installation. -

    -

    -In addition to the build flags, the flags handled by 'go test' itself are: -

    -
    -args
    -    Pass the remainder of the command line (everything after -args)
    -    to the test binary, uninterpreted and unchanged.
    -    Because this flag consumes the remainder of the command line,
    -    the package list (if present) must appear before this flag.
    -
    --c
    -    Compile the test binary to pkg.test but do not run it
    -    (where pkg is the last element of the package's import path).
    -    The file name can be changed with the -o flag.
    -
    --exec xprog
    -    Run the test binary using xprog. The behavior is the same as
    -    in 'go run'. See 'go help run' for details.
    -
    --i
    -    Install packages that are dependencies of the test.
    -    Do not run the test.
    -
    --o file
    -    Compile the test binary to the named file.
    -    The test still runs (unless -c or -i is specified).
    -
    -

    -The test binary also accepts flags that control execution of the test; these -flags are also accessible by 'go test'. See 'go help testflag' for details. -

    -

    -For more about build flags, see 'go help build'. -For more about specifying packages, see 'go help packages'. -

    -

    -See also: go build, go vet. -

    -

    Run specified go tool

    -

    -Usage: -

    -
    go tool [-n] command [args...]
    -
    -

    -Tool runs the go tool command identified by the arguments. -With no arguments it prints the list of known tools. -

    -

    -The -n flag causes tool to print the command that would be -executed but not execute it. -

    -

    -For more about each tool command, see 'go tool command -h'. -

    -

    Print Go version

    -

    -Usage: -

    -
    go version
    -
    -

    -Version prints the Go version, as reported by runtime.Version. -

    -

    Run go tool vet on packages

    -

    -Usage: -

    -
    go vet [-n] [-x] [build flags] [packages]
    -
    -

    -Vet runs the Go vet command on the packages named by the import paths. -

    -

    -For more about vet, see 'go doc cmd/vet'. -For more about specifying packages, see 'go help packages'. -

    -

    -To run the vet tool with specific options, run 'go tool vet'. -

    -

    -The -n flag prints commands that would be executed. -The -x flag prints commands as they are executed. -

    -

    -For more about build flags, see 'go help build'. -

    -

    -See also: go fmt, go fix. -

    -

    Calling between Go and C

    -

    -There are two different ways to call between Go and C/C++ code. -

    -

    -The first is the cgo tool, which is part of the Go distribution. For -information on how to use it see the cgo documentation (go doc cmd/cgo). -

    -

    -The second is the SWIG program, which is a general tool for -interfacing between languages. For information on SWIG see -http://swig.org/. When running go build, any file with a .swig -extension will be passed to SWIG. Any file with a .swigcxx extension -will be passed to SWIG with the -c++ option. -

    -

    -When either cgo or SWIG is used, go build will pass any .c, .m, .s, -or .S files to the C compiler, and any .cc, .cpp, .cxx files to the C++ -compiler. The CC or CXX environment variables may be set to determine -the C or C++ compiler, respectively, to use. -

    -

    Description of build modes

    -

    -The 'go build' and 'go install' commands take a -buildmode argument which -indicates which kind of object file is to be built. Currently supported values -are: -

    -
    -buildmode=archive
    -	Build the listed non-main packages into .a files. Packages named
    -	main are ignored.
    -
    --buildmode=c-archive
    -	Build the listed main package, plus all packages it imports,
    -	into a C archive file. The only callable symbols will be those
    -	functions exported using a cgo //export comment. Requires
    -	exactly one main package to be listed.
    -
    --buildmode=c-shared
    -	Build the listed main packages, plus all packages that they
    -	import, into C shared libraries. The only callable symbols will
    -	be those functions exported using a cgo //export comment.
    -	Non-main packages are ignored.
    -
    --buildmode=default
    -	Listed main packages are built into executables and listed
    -	non-main packages are built into .a files (the default
    -	behavior).
    -
    --buildmode=shared
    -	Combine all the listed non-main packages into a single shared
    -	library that will be used when building with the -linkshared
    -	option. Packages named main are ignored.
    -
    --buildmode=exe
    -	Build the listed main packages and everything they import into
    -	executables. Packages not named main are ignored.
    -
    --buildmode=pie
    -	Build the listed main packages and everything they import into
    -	position independent executables (PIE). Packages not named
    -	main are ignored.
    -
    --buildmode=plugin
    -	Build the listed main packages, plus all packages that they
    -	import, into a Go plugin. Packages not named main are ignored.
    -
    -

    File types

    -

    -The go command examines the contents of a restricted set of files -in each directory. It identifies which files to examine based on -the extension of the file name. These extensions are: -

    -
    .go
    -	Go source files.
    -.c, .h
    -	C source files.
    -	If the package uses cgo or SWIG, these will be compiled with the
    -	OS-native compiler (typically gcc); otherwise they will
    -	trigger an error.
    -.cc, .cpp, .cxx, .hh, .hpp, .hxx
    -	C++ source files. Only useful with cgo or SWIG, and always
    -	compiled with the OS-native compiler.
    -.m
    -	Objective-C source files. Only useful with cgo, and always
    -	compiled with the OS-native compiler.
    -.s, .S
    -	Assembler source files.
    -	If the package uses cgo or SWIG, these will be assembled with the
    -	OS-native assembler (typically gcc (sic)); otherwise they
    -	will be assembled with the Go assembler.
    -.swig, .swigcxx
    -	SWIG definition files.
    -.syso
    -	System object files.
    -
    -

    -Files of each of these types except .syso may contain build -constraints, but the go command stops scanning for build constraints -at the first item in the file that is not a blank line or //-style -line comment. See the go/build package documentation for -more details. -

    -

    -Non-test Go source files can also include a //go:binary-only-package -comment, indicating that the package sources are included -for documentation only and must not be used to build the -package binary. This enables distribution of Go packages in -their compiled form alone. See the go/build package documentation -for more details. -

    -

    GOPATH environment variable

    -

    -The Go path is used to resolve import statements. -It is implemented by and documented in the go/build package. -

    -

    -The GOPATH environment variable lists places to look for Go code. -On Unix, the value is a colon-separated string. -On Windows, the value is a semicolon-separated string. -On Plan 9, the value is a list. -

    -

    -If the environment variable is unset, GOPATH defaults -to a subdirectory named "go" in the user's home directory -($HOME/go on Unix, %USERPROFILE%\go on Windows), -unless that directory holds a Go distribution. -Run "go env GOPATH" to see the current GOPATH. -

    -

    -See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. -

    -

    -Each directory listed in GOPATH must have a prescribed structure: -

    -

    -The src directory holds source code. The path below src -determines the import path or executable name. -

    -

    -The pkg directory holds installed package objects. -As in the Go tree, each target operating system and -architecture pair has its own subdirectory of pkg -(pkg/GOOS_GOARCH). -

    -

    -If DIR is a directory listed in the GOPATH, a package with -source in DIR/src/foo/bar can be imported as "foo/bar" and -has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". -

    -

    -The bin directory holds compiled commands. -Each command is named for its source directory, but only -the final element, not the entire path. That is, the -command with source in DIR/src/foo/quux is installed into -DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped -so that you can add DIR/bin to your PATH to get at the -installed commands. If the GOBIN environment variable is -set, commands are installed to the directory it names instead -of DIR/bin. GOBIN must be an absolute path. -

    -

    -Here's an example directory layout: -

    -
    GOPATH=/home/user/go
    -
    -/home/user/go/
    -    src/
    -        foo/
    -            bar/               (go code in package bar)
    -                x.go
    -            quux/              (go code in package main)
    -                y.go
    -    bin/
    -        quux                   (installed command)
    -    pkg/
    -        linux_amd64/
    -            foo/
    -                bar.a          (installed package object)
    -
    -

    -Go searches each directory listed in GOPATH to find source code, -but new packages are always downloaded into the first directory -in the list. -

    -

    -See https://golang.org/doc/code.html for an example. -

    -

    Internal Directories

    -

    -Code in or below a directory named "internal" is importable only -by code in the directory tree rooted at the parent of "internal". -Here's an extended version of the directory layout above: -

    -
    /home/user/go/
    -    src/
    -        crash/
    -            bang/              (go code in package bang)
    -                b.go
    -        foo/                   (go code in package foo)
    -            f.go
    -            bar/               (go code in package bar)
    -                x.go
    -            internal/
    -                baz/           (go code in package baz)
    -                    z.go
    -            quux/              (go code in package main)
    -                y.go
    -
    -

    -The code in z.go is imported as "foo/internal/baz", but that -import statement can only appear in source files in the subtree -rooted at foo. The source files foo/f.go, foo/bar/x.go, and -foo/quux/y.go can all import "foo/internal/baz", but the source file -crash/bang/b.go cannot. -

    -

    -See https://golang.org/s/go14internal for details. -

    -

    Vendor Directories

    -

    -Go 1.6 includes support for using local copies of external dependencies -to satisfy imports of those dependencies, often referred to as vendoring. -

    -

    -Code below a directory named "vendor" is importable only -by code in the directory tree rooted at the parent of "vendor", -and only using an import path that omits the prefix up to and -including the vendor element. -

    -

    -Here's the example from the previous section, -but with the "internal" directory renamed to "vendor" -and a new foo/vendor/crash/bang directory added: -

    -
    /home/user/go/
    -    src/
    -        crash/
    -            bang/              (go code in package bang)
    -                b.go
    -        foo/                   (go code in package foo)
    -            f.go
    -            bar/               (go code in package bar)
    -                x.go
    -            vendor/
    -                crash/
    -                    bang/      (go code in package bang)
    -                        b.go
    -                baz/           (go code in package baz)
    -                    z.go
    -            quux/              (go code in package main)
    -                y.go
    -
    -

    -The same visibility rules apply as for internal, but the code -in z.go is imported as "baz", not as "foo/vendor/baz". -

    -

    -Code in vendor directories deeper in the source tree shadows -code in higher directories. Within the subtree rooted at foo, an import -of "crash/bang" resolves to "foo/vendor/crash/bang", not the -top-level "crash/bang". -

    -

    -Code in vendor directories is not subject to import path -checking (see 'go help importpath'). -

    -

    -When 'go get' checks out or updates a git repository, it now also -updates submodules. -

    -

    -Vendor directories do not affect the placement of new repositories -being checked out for the first time by 'go get': those are always -placed in the main GOPATH, never in a vendor subtree. -

    -

    -See https://golang.org/s/go15vendor for details. -

    -

    Environment variables

    -

    -The go command, and the tools it invokes, examine a few different -environment variables. For many of these, you can see the default -value of on your system by running 'go env NAME', where NAME is the -name of the variable. -

    -

    -General-purpose environment variables: -

    -
    GCCGO
    -	The gccgo command to run for 'go build -compiler=gccgo'.
    -GOARCH
    -	The architecture, or processor, for which to compile code.
    -	Examples are amd64, 386, arm, ppc64.
    -GOBIN
    -	The directory where 'go install' will install a command.
    -GOOS
    -	The operating system for which to compile code.
    -	Examples are linux, darwin, windows, netbsd.
    -GOPATH
    -	For more details see: 'go help gopath'.
    -GORACE
    -	Options for the race detector.
    -	See https://golang.org/doc/articles/race_detector.html.
    -GOROOT
    -	The root of the go tree.
    -
    -

    -Environment variables for use with cgo: -

    -
    CC
    -	The command to use to compile C code.
    -CGO_ENABLED
    -	Whether the cgo command is supported.  Either 0 or 1.
    -CGO_CFLAGS
    -	Flags that cgo will pass to the compiler when compiling
    -	C code.
    -CGO_CPPFLAGS
    -	Flags that cgo will pass to the compiler when compiling
    -	C or C++ code.
    -CGO_CXXFLAGS
    -	Flags that cgo will pass to the compiler when compiling
    -	C++ code.
    -CGO_FFLAGS
    -	Flags that cgo will pass to the compiler when compiling
    -	Fortran code.
    -CGO_LDFLAGS
    -	Flags that cgo will pass to the compiler when linking.
    -CXX
    -	The command to use to compile C++ code.
    -PKG_CONFIG
    -	Path to pkg-config tool.
    -
    -

    -Architecture-specific environment variables: -

    -
    GOARM
    -	For GOARCH=arm, the ARM architecture for which to compile.
    -	Valid values are 5, 6, 7.
    -GO386
    -	For GOARCH=386, the floating point instruction set.
    -	Valid values are 387, sse2.
    -
    -

    -Special-purpose environment variables: -

    -
    GOROOT_FINAL
    -	The root of the installed Go tree, when it is
    -	installed in a location other than where it is built.
    -	File names in stack traces are rewritten from GOROOT to
    -	GOROOT_FINAL.
    -GO_EXTLINK_ENABLED
    -	Whether the linker should use external linking mode
    -	when using -linkmode=auto with code that uses cgo.
    -	Set to 0 to disable external linking mode, 1 to enable it.
    -GIT_ALLOW_PROTOCOL
    -	Defined by Git. A colon-separated list of schemes that are allowed to be used
    -	with git fetch/clone. If set, any scheme not explicitly mentioned will be
    -	considered insecure by 'go get'.
    -
    -

    Import path syntax

    -

    -An import path (see 'go help packages') denotes a package stored in the local -file system. In general, an import path denotes either a standard package (such -as "unicode/utf8") or a package found in one of the work spaces (For more -details see: 'go help gopath'). -

    -

    Relative import paths

    -

    -An import path beginning with ./ or ../ is called a relative path. -The toolchain supports relative import paths as a shortcut in two ways. -

    -

    -First, a relative path can be used as a shorthand on the command line. -If you are working in the directory containing the code imported as -"unicode" and want to run the tests for "unicode/utf8", you can type -"go test ./utf8" instead of needing to specify the full path. -Similarly, in the reverse situation, "go test .." will test "unicode" from -the "unicode/utf8" directory. Relative patterns are also allowed, like -"go test ./..." to test all subdirectories. See 'go help packages' for details -on the pattern syntax. -

    -

    -Second, if you are compiling a Go program not in a work space, -you can use a relative path in an import statement in that program -to refer to nearby code also not in a work space. -This makes it easy to experiment with small multipackage programs -outside of the usual work spaces, but such programs cannot be -installed with "go install" (there is no work space in which to install them), -so they are rebuilt from scratch each time they are built. -To avoid ambiguity, Go programs cannot use relative import paths -within a work space. -

    -

    Remote import paths

    -

    -Certain import paths also -describe how to obtain the source code for the package using -a revision control system. -

    -

    -A few common code hosting sites have special syntax: -

    -
    Bitbucket (Git, Mercurial)
    -
    -	import "bitbucket.org/user/project"
    -	import "bitbucket.org/user/project/sub/directory"
    -
    -GitHub (Git)
    -
    -	import "github.com/user/project"
    -	import "github.com/user/project/sub/directory"
    -
    -Launchpad (Bazaar)
    -
    -	import "launchpad.net/project"
    -	import "launchpad.net/project/series"
    -	import "launchpad.net/project/series/sub/directory"
    -
    -	import "launchpad.net/~user/project/branch"
    -	import "launchpad.net/~user/project/branch/sub/directory"
    -
    -IBM DevOps Services (Git)
    -
    -	import "hub.jazz.net/git/user/project"
    -	import "hub.jazz.net/git/user/project/sub/directory"
    -
    -

    -For code hosted on other servers, import paths may either be qualified -with the version control type, or the go tool can dynamically fetch -the import path over https/http and discover where the code resides -from a <meta> tag in the HTML. -

    -

    -To declare the code location, an import path of the form -

    -
    repository.vcs/path
    -
    -

    -specifies the given repository, with or without the .vcs suffix, -using the named version control system, and then the path inside -that repository. The supported version control systems are: -

    -
    Bazaar      .bzr
    -Git         .git
    -Mercurial   .hg
    -Subversion  .svn
    -
    -

    -For example, -

    -
    import "example.org/user/foo.hg"
    -
    -

    -denotes the root directory of the Mercurial repository at -example.org/user/foo or foo.hg, and -

    -
    import "example.org/repo.git/foo/bar"
    -
    -

    -denotes the foo/bar directory of the Git repository at -example.org/repo or repo.git. -

    -

    -When a version control system supports multiple protocols, -each is tried in turn when downloading. For example, a Git -download tries https://, then git+ssh://. -

    -

    -By default, downloads are restricted to known secure protocols -(e.g. https, ssh). To override this setting for Git downloads, the -GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: -'go help environment'). -

    -

    -If the import path is not a known code hosting site and also lacks a -version control qualifier, the go tool attempts to fetch the import -over https/http and looks for a <meta> tag in the document's HTML -<head>. -

    -

    -The meta tag has the form: -

    -
    <meta name="go-import" content="import-prefix vcs repo-root">
    -
    -

    -The import-prefix is the import path corresponding to the repository -root. It must be a prefix or an exact match of the package being -fetched with "go get". If it's not an exact match, another http -request is made at the prefix to verify the <meta> tags match. -

    -

    -The meta tag should appear as early in the file as possible. -In particular, it should appear before any raw JavaScript or CSS, -to avoid confusing the go command's restricted parser. -

    -

    -The vcs is one of "git", "hg", "svn", etc, -

    -

    -The repo-root is the root of the version control system -containing a scheme and not containing a .vcs qualifier. -

    -

    -For example, -

    -
    import "example.org/pkg/foo"
    -
    -

    -will result in the following requests: -

    -
    https://example.org/pkg/foo?go-get=1 (preferred)
    -http://example.org/pkg/foo?go-get=1  (fallback, only with -insecure)
    -
    -

    -If that page contains the meta tag -

    -
    <meta name="go-import" content="example.org git https://code.org/r/p/exproj">
    -
    -

    -the go tool will verify that https://example.org/?go-get=1 contains the -same meta tag and then git clone https://code.org/r/p/exproj into -GOPATH/src/example.org. -

    -

    -New downloaded packages are written to the first directory listed in the GOPATH -environment variable (For more details see: 'go help gopath'). -

    -

    -The go command attempts to download the version of the -package appropriate for the Go release being used. -Run 'go help get' for more. -

    -

    Import path checking

    -

    -When the custom import path feature described above redirects to a -known code hosting site, each of the resulting packages has two possible -import paths, using the custom domain or the known hosting site. -

    -

    -A package statement is said to have an "import comment" if it is immediately -followed (before the next newline) by a comment of one of these two forms: -

    -
    package math // import "path"
    -package math /* import "path" */
    -
    -

    -The go command will refuse to install a package with an import comment -unless it is being referred to by that import path. In this way, import comments -let package authors make sure the custom import path is used and not a -direct path to the underlying code hosting site. -

    -

    -Import path checking is disabled for code found within vendor trees. -This makes it possible to copy code into alternate locations in vendor trees -without needing to update import comments. -

    -

    -See https://golang.org/s/go14customimport for details. -

    -

    Description of package lists

    -

    -Many commands apply to a set of packages: -

    -
    go action [packages]
    -
    -

    -Usually, [packages] is a list of import paths. -

    -

    -An import path that is a rooted path or that begins with -a . or .. element is interpreted as a file system path and -denotes the package in that directory. -

    -

    -Otherwise, the import path P denotes the package found in -the directory DIR/src/P for some DIR listed in the GOPATH -environment variable (For more details see: 'go help gopath'). -

    -

    -If no import paths are given, the action applies to the -package in the current directory. -

    -

    -There are four reserved names for paths that should not be used -for packages to be built with the go tool: -

    -

    -- "main" denotes the top-level package in a stand-alone executable. -

    -

    -- "all" expands to all package directories found in all the GOPATH -trees. For example, 'go list all' lists all the packages on the local -system. -

    -

    -- "std" is like all but expands to just the packages in the standard -Go library. -

    -

    -- "cmd" expands to the Go repository's commands and their -internal libraries. -

    -

    -Import paths beginning with "cmd/" only match source code in -the Go repository. -

    -

    -An import path is a pattern if it includes one or more "..." wildcards, -each of which can match any string, including the empty string and -strings containing slashes. Such a pattern expands to all package -directories found in the GOPATH trees with names matching the -patterns. As a special case, x/... matches x as well as x's subdirectories. -For example, net/... expands to net and packages in its subdirectories. -

    -

    -An import path can also name a package to be downloaded from -a remote repository. Run 'go help importpath' for details. -

    -

    -Every package in a program must have a unique import path. -By convention, this is arranged by starting each path with a -unique prefix that belongs to you. For example, paths used -internally at Google all begin with 'google', and paths -denoting remote repositories begin with the path to the code, -such as 'github.com/user/repo'. -

    -

    -Packages in a program need not have unique package names, -but there are two reserved package names with special meaning. -The name main indicates a command, not a library. -Commands are built into binaries and cannot be imported. -The name documentation indicates documentation for -a non-Go program in the directory. Files in package documentation -are ignored by the go command. -

    -

    -As a special case, if the package list is a list of .go files from a -single directory, the command is applied to a single synthesized -package made up of exactly those files, ignoring any build constraints -in those files and ignoring any other files in the directory. -

    -

    -Directory and file names that begin with "." or "_" are ignored -by the go tool, as are directories named "testdata". -

    -

    Description of testing flags

    -

    -The 'go test' command takes both flags that apply to 'go test' itself -and flags that apply to the resulting test binary. -

    -

    -Several of the flags control profiling and write an execution profile -suitable for "go tool pprof"; run "go tool pprof -h" for more -information. The --alloc_space, --alloc_objects, and --show_bytes -options of pprof control how the information is presented. -

    -

    -The following flags are recognized by the 'go test' command and -control the execution of any test: -

    -
    -bench regexp
    -    Run (sub)benchmarks matching a regular expression.
    -    The given regular expression is split into smaller ones by
    -    top-level '/', where each must match the corresponding part of a
    -    benchmark's identifier.
    -    By default, no benchmarks run. To run all benchmarks,
    -    use '-bench .' or '-bench=.'.
    -
    --benchtime t
    -    Run enough iterations of each benchmark to take t, specified
    -    as a time.Duration (for example, -benchtime 1h30s).
    -    The default is 1 second (1s).
    -
    --count n
    -    Run each test and benchmark n times (default 1).
    -    If -cpu is set, run n times for each GOMAXPROCS value.
    -    Examples are always run once.
    -
    --cover
    -    Enable coverage analysis.
    -
    --covermode set,count,atomic
    -    Set the mode for coverage analysis for the package[s]
    -    being tested. The default is "set" unless -race is enabled,
    -    in which case it is "atomic".
    -    The values:
    -	set: bool: does this statement run?
    -	count: int: how many times does this statement run?
    -	atomic: int: count, but correct in multithreaded tests;
    -		significantly more expensive.
    -    Sets -cover.
    -
    --coverpkg pkg1,pkg2,pkg3
    -    Apply coverage analysis in each test to the given list of packages.
    -    The default is for each test to analyze only the package being tested.
    -    Packages are specified as import paths.
    -    Sets -cover.
    -
    --cpu 1,2,4
    -    Specify a list of GOMAXPROCS values for which the tests or
    -    benchmarks should be executed.  The default is the current value
    -    of GOMAXPROCS.
    -
    --parallel n
    -    Allow parallel execution of test functions that call t.Parallel.
    -    The value of this flag is the maximum number of tests to run
    -    simultaneously; by default, it is set to the value of GOMAXPROCS.
    -    Note that -parallel only applies within a single test binary.
    -    The 'go test' command may run tests for different packages
    -    in parallel as well, according to the setting of the -p flag
    -    (see 'go help build').
    -
    --run regexp
    -    Run only those tests and examples matching the regular expression.
    -    For tests the regular expression is split into smaller ones by
    -    top-level '/', where each must match the corresponding part of a
    -    test's identifier.
    -
    --short
    -    Tell long-running tests to shorten their run time.
    -    It is off by default but set during all.bash so that installing
    -    the Go tree can run a sanity check but not spend time running
    -    exhaustive tests.
    -
    --timeout t
    -    If a test runs longer than t, panic.
    -    The default is 10 minutes (10m).
    -
    --v
    -    Verbose output: log all tests as they are run. Also print all
    -    text from Log and Logf calls even if the test succeeds.
    -
    -

    -The following flags are also recognized by 'go test' and can be used to -profile the tests during execution: -

    -
    -benchmem
    -    Print memory allocation statistics for benchmarks.
    -
    --blockprofile block.out
    -    Write a goroutine blocking profile to the specified file
    -    when all tests are complete.
    -    Writes test binary as -c would.
    -
    --blockprofilerate n
    -    Control the detail provided in goroutine blocking profiles by
    -    calling runtime.SetBlockProfileRate with n.
    -    See 'go doc runtime.SetBlockProfileRate'.
    -    The profiler aims to sample, on average, one blocking event every
    -    n nanoseconds the program spends blocked.  By default,
    -    if -test.blockprofile is set without this flag, all blocking events
    -    are recorded, equivalent to -test.blockprofilerate=1.
    -
    --coverprofile cover.out
    -    Write a coverage profile to the file after all tests have passed.
    -    Sets -cover.
    -
    --cpuprofile cpu.out
    -    Write a CPU profile to the specified file before exiting.
    -    Writes test binary as -c would.
    -
    --memprofile mem.out
    -    Write a memory profile to the file after all tests have passed.
    -    Writes test binary as -c would.
    -
    --memprofilerate n
    -    Enable more precise (and expensive) memory profiles by setting
    -    runtime.MemProfileRate.  See 'go doc runtime.MemProfileRate'.
    -    To profile all memory allocations, use -test.memprofilerate=1
    -    and pass --alloc_space flag to the pprof tool.
    -
    --mutexprofile mutex.out
    -    Write a mutex contention profile to the specified file
    -    when all tests are complete.
    -    Writes test binary as -c would.
    -
    --mutexprofilefraction n
    -    Sample 1 in n stack traces of goroutines holding a
    -    contended mutex.
    -
    --outputdir directory
    -    Place output files from profiling in the specified directory,
    -    by default the directory in which "go test" is running.
    -
    --trace trace.out
    -    Write an execution trace to the specified file before exiting.
    -
    -

    -Each of these flags is also recognized with an optional 'test.' prefix, -as in -test.v. When invoking the generated test binary (the result of -'go test -c') directly, however, the prefix is mandatory. -

    -

    -The 'go test' command rewrites or removes recognized flags, -as appropriate, both before and after the optional package list, -before invoking the test binary. -

    -

    -For instance, the command -

    -
    go test -v -myflag testdata -cpuprofile=prof.out -x
    -
    -

    -will compile the test binary and then run it as -

    -
    pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
    -
    -

    -(The -x flag is removed because it applies only to the go command's -execution, not to the test itself.) -

    -

    -The test flags that generate profiles (other than for coverage) also -leave the test binary in pkg.test for use when analyzing the profiles. -

    -

    -When 'go test' runs a test binary, it does so from within the -corresponding package's source code directory. Depending on the test, -it may be necessary to do the same when invoking a generated test -binary directly. -

    -

    -The command-line package list, if present, must appear before any -flag not known to the go test command. Continuing the example above, -the package list would have to appear before -myflag, but could appear -on either side of -v. -

    -

    -To keep an argument for a test binary from being interpreted as a -known flag or a package name, use -args (see 'go help test') which -passes the remainder of the command line through to the test binary -uninterpreted and unaltered. -

    -

    -For instance, the command -

    -
    go test -v -args -x -v
    -
    -

    -will compile the test binary and then run it as -

    -
    pkg.test -test.v -x -v
    -
    -

    -Similarly, -

    -
    go test -args math
    -
    -

    -will compile the test binary and then run it as -

    -
    pkg.test math
    -
    -

    -In the first example, the -x and the second -v are passed through to the -test binary unchanged and with no effect on the go command itself. -In the second example, the argument math is passed through to the test -binary, instead of being interpreted as the package list. -

    -

    Description of testing functions

    -

    -The 'go test' command expects to find test, benchmark, and example functions -in the "*_test.go" files corresponding to the package under test. -

    -

    -A test function is one named TestXXX (where XXX is any alphanumeric string -not starting with a lower case letter) and should have the signature, -

    -
    func TestXXX(t *testing.T) { ... }
    -
    -

    -A benchmark function is one named BenchmarkXXX and should have the signature, -

    -
    func BenchmarkXXX(b *testing.B) { ... }
    -
    -

    -An example function is similar to a test function but, instead of using -*testing.T to report success or failure, prints output to os.Stdout. -If the last comment in the function starts with "Output:" then the output -is compared exactly against the comment (see examples below). If the last -comment begins with "Unordered output:" then the output is compared to the -comment, however the order of the lines is ignored. An example with no such -comment is compiled but not executed. An example with no text after -"Output:" is compiled, executed, and expected to produce no output. -

    -

    -Godoc displays the body of ExampleXXX to demonstrate the use -of the function, constant, or variable XXX. An example of a method M with -receiver type T or *T is named ExampleT_M. There may be multiple examples -for a given function, constant, or variable, distinguished by a trailing _xxx, -where xxx is a suffix not beginning with an upper case letter. -

    -

    -Here is an example of an example: -

    -
    func ExamplePrintln() {
    -	Println("The output of\nthis example.")
    -	// Output: The output of
    -	// this example.
    -}
    -
    -

    -Here is another example where the ordering of the output is ignored: -

    -
    func ExamplePerm() {
    -	for _, value := range Perm(4) {
    -		fmt.Println(value)
    -	}
    -
    -	// Unordered output: 4
    -	// 2
    -	// 1
    -	// 3
    -	// 0
    -}
    -
    -

    -The entire test file is presented as the example when it contains a single -example function, at least one other function, type, variable, or constant -declaration, and no test or benchmark functions. -

    -

    -See the documentation of the testing package for more information. -

    - - - -
    -
    - - - - - - - - -`)) diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md deleted file mode 100644 index fb5c5efb..00000000 --- a/vendor/golang.org/x/net/http2/h2i/README.md +++ /dev/null @@ -1,97 +0,0 @@ -# h2i - -**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' -days of telnetting to your HTTP/1.n servers? We're bringing you -back. - -Features: -- send raw HTTP/2 frames - - PING - - SETTINGS - - HEADERS - - etc -- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 -- pretty print all received HTTP/2 frames from the peer (including HPACK decoding) -- tab completion of commands, options - -Not yet features, but soon: -- unnecessary CONTINUATION frames on short boundaries, to test peer implementations -- request bodies (DATA frames) -- send invalid frames for testing server implementations (supported by underlying Framer) - -Later: -- act like a server - -## Installation - -``` -$ go get golang.org/x/net/http2/h2i -$ h2i -``` - -## Demo - -``` -$ h2i -Usage: h2i - - -insecure - Whether to skip TLS cert validation - -nextproto string - Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") - -$ h2i google.com -Connecting to google.com:443 ... -Connected to 74.125.224.41:443 -Negotiated protocol "h2-14" -[FrameHeader SETTINGS len=18] - [MAX_CONCURRENT_STREAMS = 100] - [INITIAL_WINDOW_SIZE = 1048576] - [MAX_FRAME_SIZE = 16384] -[FrameHeader WINDOW_UPDATE len=4] - Window-Increment = 983041 - -h2i> PING h2iSayHI -[FrameHeader PING flags=ACK len=8] - Data = "h2iSayHI" -h2i> headers -(as HTTP/1.1)> GET / HTTP/1.1 -(as HTTP/1.1)> Host: ip.appspot.com -(as HTTP/1.1)> User-Agent: h2i/brad-n-blake -(as HTTP/1.1)> -Opening Stream-ID 1: - :authority = ip.appspot.com - :method = GET - :path = / - :scheme = https - user-agent = h2i/brad-n-blake -[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] - :status = "200" - alternate-protocol = "443:quic,p=1" - content-length = "15" - content-type = "text/html" - date = "Fri, 01 May 2015 23:06:56 GMT" - server = "Google Frontend" -[FrameHeader DATA flags=END_STREAM stream=1 len=15] - "173.164.155.78\n" -[FrameHeader PING len=8] - Data = "\x00\x00\x00\x00\x00\x00\x00\x00" -h2i> ping -[FrameHeader PING flags=ACK len=8] - Data = "h2i_ping" -h2i> ping -[FrameHeader PING flags=ACK len=8] - Data = "h2i_ping" -h2i> ping -[FrameHeader GOAWAY len=22] - Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) - -ReadFrame: EOF -``` - -## Status - -Quick few hour hack. So much yet to do. Feel free to file issues for -bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) -and I aren't yet accepting pull requests until things settle down. - diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go deleted file mode 100644 index 62e57527..00000000 --- a/vendor/golang.org/x/net/http2/h2i/h2i.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -/* -The h2i command is an interactive HTTP/2 console. - -Usage: - $ h2i [flags] - -Interactive commands in the console: (all parts case-insensitive) - - ping [data] - settings ack - settings FOO=n BAR=z - headers (open a new stream by typing HTTP/1.1) -*/ -package main - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "flag" - "fmt" - "io" - "log" - "net" - "net/http" - "os" - "regexp" - "strconv" - "strings" - - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -// Flags -var ( - flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") - flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") - flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.") - flagDial = flag.String("dial", "", "optional ip:port to dial, to connect to a host:port but use a different SNI name (including a SNI name without DNS)") -) - -type command struct { - run func(*h2i, []string) error // required - - // complete optionally specifies tokens (case-insensitive) which are - // valid for this subcommand. - complete func() []string -} - -var commands = map[string]command{ - "ping": {run: (*h2i).cmdPing}, - "settings": { - run: (*h2i).cmdSettings, - complete: func() []string { - return []string{ - "ACK", - http2.SettingHeaderTableSize.String(), - http2.SettingEnablePush.String(), - http2.SettingMaxConcurrentStreams.String(), - http2.SettingInitialWindowSize.String(), - http2.SettingMaxFrameSize.String(), - http2.SettingMaxHeaderListSize.String(), - } - }, - }, - "quit": {run: (*h2i).cmdQuit}, - "headers": {run: (*h2i).cmdHeaders}, -} - -func usage() { - fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") - flag.PrintDefaults() -} - -// withPort adds ":443" if another port isn't already present. -func withPort(host string) string { - if _, _, err := net.SplitHostPort(host); err != nil { - return net.JoinHostPort(host, "443") - } - return host -} - -// withoutPort strips the port from addr if present. -func withoutPort(addr string) string { - if h, _, err := net.SplitHostPort(addr); err == nil { - return h - } - return addr -} - -// h2i is the app's state. -type h2i struct { - host string - tc *tls.Conn - framer *http2.Framer - term *terminal.Terminal - - // owned by the command loop: - streamID uint32 - hbuf bytes.Buffer - henc *hpack.Encoder - - // owned by the readFrames loop: - peerSetting map[http2.SettingID]uint32 - hdec *hpack.Decoder -} - -func main() { - flag.Usage = usage - flag.Parse() - if flag.NArg() != 1 { - usage() - os.Exit(2) - } - log.SetFlags(0) - - host := flag.Arg(0) - app := &h2i{ - host: host, - peerSetting: make(map[http2.SettingID]uint32), - } - app.henc = hpack.NewEncoder(&app.hbuf) - - if err := app.Main(); err != nil { - if app.term != nil { - app.logf("%v\n", err) - } else { - fmt.Fprintf(os.Stderr, "%v\n", err) - } - os.Exit(1) - } - fmt.Fprintf(os.Stdout, "\n") -} - -func (app *h2i) Main() error { - cfg := &tls.Config{ - ServerName: withoutPort(app.host), - NextProtos: strings.Split(*flagNextProto, ","), - InsecureSkipVerify: *flagInsecure, - } - - hostAndPort := *flagDial - if hostAndPort == "" { - hostAndPort = withPort(app.host) - } - log.Printf("Connecting to %s ...", hostAndPort) - tc, err := tls.Dial("tcp", hostAndPort, cfg) - if err != nil { - return fmt.Errorf("Error dialing %s: %v", hostAndPort, err) - } - log.Printf("Connected to %v", tc.RemoteAddr()) - defer tc.Close() - - if err := tc.Handshake(); err != nil { - return fmt.Errorf("TLS handshake: %v", err) - } - if !*flagInsecure { - if err := tc.VerifyHostname(app.host); err != nil { - return fmt.Errorf("VerifyHostname: %v", err) - } - } - state := tc.ConnectionState() - log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) - if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { - return fmt.Errorf("Could not negotiate protocol mutually") - } - - if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { - return err - } - - app.framer = http2.NewFramer(tc, tc) - - oldState, err := terminal.MakeRaw(int(os.Stdin.Fd())) - if err != nil { - return err - } - defer terminal.Restore(0, oldState) - - var screen = struct { - io.Reader - io.Writer - }{os.Stdin, os.Stdout} - - app.term = terminal.NewTerminal(screen, "h2i> ") - lastWord := regexp.MustCompile(`.+\W(\w+)$`) - app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { - if key != '\t' { - return - } - if pos != len(line) { - // TODO: we're being lazy for now, only supporting tab completion at the end. - return - } - // Auto-complete for the command itself. - if !strings.Contains(line, " ") { - var name string - name, _, ok = lookupCommand(line) - if !ok { - return - } - return name, len(name), true - } - _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) - if !ok || c.complete == nil { - return - } - if strings.HasSuffix(line, " ") { - app.logf("%s", strings.Join(c.complete(), " ")) - return line, pos, true - } - m := lastWord.FindStringSubmatch(line) - if m == nil { - return line, len(line), true - } - soFar := m[1] - var match []string - for _, cand := range c.complete() { - if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { - continue - } - match = append(match, cand) - } - if len(match) == 0 { - return - } - if len(match) > 1 { - // TODO: auto-complete any common prefix - app.logf("%s", strings.Join(match, " ")) - return line, pos, true - } - newLine = line[:len(line)-len(soFar)] + match[0] - return newLine, len(newLine), true - - } - - errc := make(chan error, 2) - go func() { errc <- app.readFrames() }() - go func() { errc <- app.readConsole() }() - return <-errc -} - -func (app *h2i) logf(format string, args ...interface{}) { - fmt.Fprintf(app.term, format+"\r\n", args...) -} - -func (app *h2i) readConsole() error { - if s := *flagSettings; s != "omit" { - var args []string - if s != "empty" { - args = strings.Split(s, ",") - } - _, c, ok := lookupCommand("settings") - if !ok { - panic("settings command not found") - } - c.run(app, args) - } - - for { - line, err := app.term.ReadLine() - if err == io.EOF { - return nil - } - if err != nil { - return fmt.Errorf("terminal.ReadLine: %v", err) - } - f := strings.Fields(line) - if len(f) == 0 { - continue - } - cmd, args := f[0], f[1:] - if _, c, ok := lookupCommand(cmd); ok { - err = c.run(app, args) - } else { - app.logf("Unknown command %q", line) - } - if err == errExitApp { - return nil - } - if err != nil { - return err - } - } -} - -func lookupCommand(prefix string) (name string, c command, ok bool) { - prefix = strings.ToLower(prefix) - if c, ok = commands[prefix]; ok { - return prefix, c, ok - } - - for full, candidate := range commands { - if strings.HasPrefix(full, prefix) { - if c.run != nil { - return "", command{}, false // ambiguous - } - c = candidate - name = full - } - } - return name, c, c.run != nil -} - -var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") - -func (a *h2i) cmdQuit(args []string) error { - if len(args) > 0 { - a.logf("the QUIT command takes no argument") - return nil - } - return errExitApp -} - -func (a *h2i) cmdSettings(args []string) error { - if len(args) == 1 && strings.EqualFold(args[0], "ACK") { - return a.framer.WriteSettingsAck() - } - var settings []http2.Setting - for _, arg := range args { - if strings.EqualFold(arg, "ACK") { - a.logf("Error: ACK must be only argument with the SETTINGS command") - return nil - } - eq := strings.Index(arg, "=") - if eq == -1 { - a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) - return nil - } - sid, ok := settingByName(arg[:eq]) - if !ok { - a.logf("Error: unknown setting name %q", arg[:eq]) - return nil - } - val, err := strconv.ParseUint(arg[eq+1:], 10, 32) - if err != nil { - a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) - return nil - } - settings = append(settings, http2.Setting{ - ID: sid, - Val: uint32(val), - }) - } - a.logf("Sending: %v", settings) - return a.framer.WriteSettings(settings...) -} - -func settingByName(name string) (http2.SettingID, bool) { - for _, sid := range [...]http2.SettingID{ - http2.SettingHeaderTableSize, - http2.SettingEnablePush, - http2.SettingMaxConcurrentStreams, - http2.SettingInitialWindowSize, - http2.SettingMaxFrameSize, - http2.SettingMaxHeaderListSize, - } { - if strings.EqualFold(sid.String(), name) { - return sid, true - } - } - return 0, false -} - -func (app *h2i) cmdPing(args []string) error { - if len(args) > 1 { - app.logf("invalid PING usage: only accepts 0 or 1 args") - return nil // nil means don't end the program - } - var data [8]byte - if len(args) == 1 { - copy(data[:], args[0]) - } else { - copy(data[:], "h2i_ping") - } - return app.framer.WritePing(false, data) -} - -func (app *h2i) cmdHeaders(args []string) error { - if len(args) > 0 { - app.logf("Error: HEADERS doesn't yet take arguments.") - // TODO: flags for restricting window size, to force CONTINUATION - // frames. - return nil - } - var h1req bytes.Buffer - app.term.SetPrompt("(as HTTP/1.1)> ") - defer app.term.SetPrompt("h2i> ") - for { - line, err := app.term.ReadLine() - if err != nil { - return err - } - h1req.WriteString(line) - h1req.WriteString("\r\n") - if line == "" { - break - } - } - req, err := http.ReadRequest(bufio.NewReader(&h1req)) - if err != nil { - app.logf("Invalid HTTP/1.1 request: %v", err) - return nil - } - if app.streamID == 0 { - app.streamID = 1 - } else { - app.streamID += 2 - } - app.logf("Opening Stream-ID %d:", app.streamID) - hbf := app.encodeHeaders(req) - if len(hbf) > 16<<10 { - app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") - return nil - } - return app.framer.WriteHeaders(http2.HeadersFrameParam{ - StreamID: app.streamID, - BlockFragment: hbf, - EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now - EndHeaders: true, // for now - }) -} - -func (app *h2i) readFrames() error { - for { - f, err := app.framer.ReadFrame() - if err != nil { - return fmt.Errorf("ReadFrame: %v", err) - } - app.logf("%v", f) - switch f := f.(type) { - case *http2.PingFrame: - app.logf(" Data = %q", f.Data) - case *http2.SettingsFrame: - f.ForeachSetting(func(s http2.Setting) error { - app.logf(" %v", s) - app.peerSetting[s.ID] = s.Val - return nil - }) - case *http2.WindowUpdateFrame: - app.logf(" Window-Increment = %v", f.Increment) - case *http2.GoAwayFrame: - app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)", f.LastStreamID, f.ErrCode, f.ErrCode) - case *http2.DataFrame: - app.logf(" %q", f.Data()) - case *http2.HeadersFrame: - if f.HasPriority() { - app.logf(" PRIORITY = %v", f.Priority) - } - if app.hdec == nil { - // TODO: if the user uses h2i to send a SETTINGS frame advertising - // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE - // and stuff here instead of using the 4k default. But for now: - tableSize := uint32(4 << 10) - app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) - } - app.hdec.Write(f.HeaderBlockFragment()) - case *http2.PushPromiseFrame: - if app.hdec == nil { - // TODO: if the user uses h2i to send a SETTINGS frame advertising - // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE - // and stuff here instead of using the 4k default. But for now: - tableSize := uint32(4 << 10) - app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) - } - app.hdec.Write(f.HeaderBlockFragment()) - } - } -} - -// called from readLoop -func (app *h2i) onNewHeaderField(f hpack.HeaderField) { - if f.Sensitive { - app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) - } - app.logf(" %s = %q", f.Name, f.Value) -} - -func (app *h2i) encodeHeaders(req *http.Request) []byte { - app.hbuf.Reset() - - // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go - host := req.Host - if host == "" { - host = req.URL.Host - } - - path := req.RequestURI - if path == "" { - path = "/" - } - - app.writeHeader(":authority", host) // probably not right for all sites - app.writeHeader(":method", req.Method) - app.writeHeader(":path", path) - app.writeHeader(":scheme", "https") - - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - if lowKey == "host" { - continue - } - for _, v := range vv { - app.writeHeader(lowKey, v) - } - } - return app.hbuf.Bytes() -} - -func (app *h2i) writeHeader(name, value string) { - app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) - app.logf(" %s = %s", name, value) -} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go deleted file mode 100644 index c2805f6a..00000000 --- a/vendor/golang.org/x/net/http2/headermap.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "net/http" - "strings" -) - -var ( - commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case -) - -func init() { - for _, v := range []string{ - "accept", - "accept-charset", - "accept-encoding", - "accept-language", - "accept-ranges", - "age", - "access-control-allow-origin", - "allow", - "authorization", - "cache-control", - "content-disposition", - "content-encoding", - "content-language", - "content-length", - "content-location", - "content-range", - "content-type", - "cookie", - "date", - "etag", - "expect", - "expires", - "from", - "host", - "if-match", - "if-modified-since", - "if-none-match", - "if-unmodified-since", - "last-modified", - "link", - "location", - "max-forwards", - "proxy-authenticate", - "proxy-authorization", - "range", - "referer", - "refresh", - "retry-after", - "server", - "set-cookie", - "strict-transport-security", - "trailer", - "transfer-encoding", - "user-agent", - "vary", - "via", - "www-authenticate", - } { - chk := http.CanonicalHeaderKey(v) - commonLowerHeader[chk] = v - commonCanonHeader[v] = chk - } -} - -func lowerHeader(v string) string { - if s, ok := commonLowerHeader[v]; ok { - return s - } - return strings.ToLower(v) -} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go deleted file mode 100644 index 54726c2a..00000000 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "io" -) - -const ( - uint32Max = ^uint32(0) - initialHeaderTableSize = 4096 -) - -type Encoder struct { - dynTab dynamicTable - // minSize is the minimum table size set by - // SetMaxDynamicTableSize after the previous Header Table Size - // Update. - minSize uint32 - // maxSizeLimit is the maximum table size this encoder - // supports. This will protect the encoder from too large - // size. - maxSizeLimit uint32 - // tableSizeUpdate indicates whether "Header Table Size - // Update" is required. - tableSizeUpdate bool - w io.Writer - buf []byte -} - -// NewEncoder returns a new Encoder which performs HPACK encoding. An -// encoded data is written to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{ - minSize: uint32Max, - maxSizeLimit: initialHeaderTableSize, - tableSizeUpdate: false, - w: w, - } - e.dynTab.table.init() - e.dynTab.setMaxSize(initialHeaderTableSize) - return e -} - -// WriteField encodes f into a single Write to e's underlying Writer. -// This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. -func (e *Encoder) WriteField(f HeaderField) error { - e.buf = e.buf[:0] - - if e.tableSizeUpdate { - e.tableSizeUpdate = false - if e.minSize < e.dynTab.maxSize { - e.buf = appendTableSize(e.buf, e.minSize) - } - e.minSize = uint32Max - e.buf = appendTableSize(e.buf, e.dynTab.maxSize) - } - - idx, nameValueMatch := e.searchTable(f) - if nameValueMatch { - e.buf = appendIndexed(e.buf, idx) - } else { - indexing := e.shouldIndex(f) - if indexing { - e.dynTab.add(f) - } - - if idx == 0 { - e.buf = appendNewName(e.buf, f, indexing) - } else { - e.buf = appendIndexedName(e.buf, f, idx, indexing) - } - } - n, err := e.w.Write(e.buf) - if err == nil && n != len(e.buf) { - err = io.ErrShortWrite - } - return err -} - -// searchTable searches f in both stable and dynamic header tables. -// The static header table is searched first. Only when there is no -// exact match for both name and value, the dynamic header table is -// then searched. If there is no match, i is 0. If both name and value -// match, i is the matched index and nameValueMatch becomes true. If -// only name matches, i points to that index and nameValueMatch -// becomes false. -func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - i, nameValueMatch = staticTable.search(f) - if nameValueMatch { - return i, true - } - - j, nameValueMatch := e.dynTab.table.search(f) - if nameValueMatch || (i == 0 && j != 0) { - return j + uint64(staticTable.len()), nameValueMatch - } - - return i, false -} - -// SetMaxDynamicTableSize changes the dynamic header table size to v. -// The actual size is bounded by the value passed to -// SetMaxDynamicTableSizeLimit. -func (e *Encoder) SetMaxDynamicTableSize(v uint32) { - if v > e.maxSizeLimit { - v = e.maxSizeLimit - } - if v < e.minSize { - e.minSize = v - } - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) -} - -// SetMaxDynamicTableSizeLimit changes the maximum value that can be -// specified in SetMaxDynamicTableSize to v. By default, it is set to -// 4096, which is the same size of the default dynamic header table -// size described in HPACK specification. If the current maximum -// dynamic header table size is strictly greater than v, "Header Table -// Size Update" will be done in the next WriteField call and the -// maximum dynamic header table size is truncated to v. -func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { - e.maxSizeLimit = v - if e.dynTab.maxSize > v { - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) - } -} - -// shouldIndex reports whether f should be indexed. -func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.Size() <= e.dynTab.maxSize -} - -// appendIndexed appends index i, as encoded in "Indexed Header Field" -// representation, to dst and returns the extended buffer. -func appendIndexed(dst []byte, i uint64) []byte { - first := len(dst) - dst = appendVarInt(dst, 7, i) - dst[first] |= 0x80 - return dst -} - -// appendNewName appends f, as encoded in one of "Literal Header field -// - New Name" representation variants, to dst and returns the -// extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Inremental Indexing" -// representation is used. -func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { - dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) - dst = appendHpackString(dst, f.Name) - return appendHpackString(dst, f.Value) -} - -// appendIndexedName appends f and index i referring indexed name -// entry, as encoded in one of "Literal Header field - Indexed Name" -// representation variants, to dst and returns the extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Incremental Indexing" -// representation is used. -func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { - first := len(dst) - var n byte - if indexing { - n = 6 - } else { - n = 4 - } - dst = appendVarInt(dst, n, i) - dst[first] |= encodeTypeByte(indexing, f.Sensitive) - return appendHpackString(dst, f.Value) -} - -// appendTableSize appends v, as encoded in "Header Table Size Update" -// representation, to dst and returns the extended buffer. -func appendTableSize(dst []byte, v uint32) []byte { - first := len(dst) - dst = appendVarInt(dst, 5, uint64(v)) - dst[first] |= 0x20 - return dst -} - -// appendVarInt appends i, as encoded in variable integer form using n -// bit prefix, to dst and returns the extended buffer. -// -// See -// http://http2.github.io/http2-spec/compression.html#integer.representation -func appendVarInt(dst []byte, n byte, i uint64) []byte { - k := uint64((1 << n) - 1) - if i < k { - return append(dst, byte(i)) - } - dst = append(dst, byte(k)) - i -= k - for ; i >= 128; i >>= 7 { - dst = append(dst, byte(0x80|(i&0x7f))) - } - return append(dst, byte(i)) -} - -// appendHpackString appends s, as encoded in "String Literal" -// representation, to dst and returns the the extended buffer. -// -// s will be encoded in Huffman codes only when it produces strictly -// shorter byte string. -func appendHpackString(dst []byte, s string) []byte { - huffmanLength := HuffmanEncodeLength(s) - if huffmanLength < uint64(len(s)) { - first := len(dst) - dst = appendVarInt(dst, 7, huffmanLength) - dst = AppendHuffmanString(dst, s) - dst[first] |= 0x80 - } else { - dst = appendVarInt(dst, 7, uint64(len(s))) - dst = append(dst, s...) - } - return dst -} - -// encodeTypeByte returns type byte. If sensitive is true, type byte -// for "Never Indexed" representation is returned. If sensitive is -// false and indexing is true, type byte for "Incremental Indexing" -// representation is returned. Otherwise, type byte for "Without -// Indexing" is returned. -func encodeTypeByte(indexing, sensitive bool) byte { - if sensitive { - return 0x10 - } - if indexing { - return 0x40 - } - return 0 -} diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go deleted file mode 100644 index 05f12db9..00000000 --- a/vendor/golang.org/x/net/http2/hpack/encode_test.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bytes" - "encoding/hex" - "fmt" - "math/rand" - "reflect" - "strings" - "testing" -) - -func TestEncoderTableSizeUpdate(t *testing.T) { - tests := []struct { - size1, size2 uint32 - wantHex string - }{ - // Should emit 2 table size updates (2048 and 4096) - {2048, 4096, "3fe10f 3fe11f 82"}, - - // Should emit 1 table size update (2048) - {16384, 2048, "3fe10f 82"}, - } - for _, tt := range tests { - var buf bytes.Buffer - e := NewEncoder(&buf) - e.SetMaxDynamicTableSize(tt.size1) - e.SetMaxDynamicTableSize(tt.size2) - if err := e.WriteField(pair(":method", "GET")); err != nil { - t.Fatal(err) - } - want := removeSpace(tt.wantHex) - if got := hex.EncodeToString(buf.Bytes()); got != want { - t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want) - } - } -} - -func TestEncoderWriteField(t *testing.T) { - var buf bytes.Buffer - e := NewEncoder(&buf) - var got []HeaderField - d := NewDecoder(4<<10, func(f HeaderField) { - got = append(got, f) - }) - - tests := []struct { - hdrs []HeaderField - }{ - {[]HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - }}, - {[]HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - pair("cache-control", "no-cache"), - }}, - {[]HeaderField{ - pair(":method", "GET"), - pair(":scheme", "https"), - pair(":path", "/index.html"), - pair(":authority", "www.example.com"), - pair("custom-key", "custom-value"), - }}, - } - for i, tt := range tests { - buf.Reset() - got = got[:0] - for _, hf := range tt.hdrs { - if err := e.WriteField(hf); err != nil { - t.Fatal(err) - } - } - _, err := d.Write(buf.Bytes()) - if err != nil { - t.Errorf("%d. Decoder Write = %v", i, err) - } - if !reflect.DeepEqual(got, tt.hdrs) { - t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs) - } - } -} - -func TestEncoderSearchTable(t *testing.T) { - e := NewEncoder(nil) - - e.dynTab.add(pair("foo", "bar")) - e.dynTab.add(pair("blake", "miz")) - e.dynTab.add(pair(":method", "GET")) - - tests := []struct { - hf HeaderField - wantI uint64 - wantMatch bool - }{ - // Name and Value match - {pair("foo", "bar"), uint64(staticTable.len()) + 3, true}, - {pair("blake", "miz"), uint64(staticTable.len()) + 2, true}, - {pair(":method", "GET"), 2, true}, - - // Only name match because Sensitive == true. This is allowed to match - // any ":method" entry. The current implementation uses the last entry - // added in newStaticTable. - {HeaderField{":method", "GET", true}, 3, false}, - - // Only Name matches - {pair("foo", "..."), uint64(staticTable.len()) + 3, false}, - {pair("blake", "..."), uint64(staticTable.len()) + 2, false}, - // As before, this is allowed to match any ":method" entry. - {pair(":method", "..."), 3, false}, - - // None match - {pair("foo-", "bar"), 0, false}, - } - for _, tt := range tests { - if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { - t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) - } - } -} - -func TestAppendVarInt(t *testing.T) { - tests := []struct { - n byte - i uint64 - want []byte - }{ - // Fits in a byte: - {1, 0, []byte{0}}, - {2, 2, []byte{2}}, - {3, 6, []byte{6}}, - {4, 14, []byte{14}}, - {5, 30, []byte{30}}, - {6, 62, []byte{62}}, - {7, 126, []byte{126}}, - {8, 254, []byte{254}}, - - // Multiple bytes: - {5, 1337, []byte{31, 154, 10}}, - } - for _, tt := range tests { - got := appendVarInt(nil, tt.n, tt.i) - if !bytes.Equal(got, tt.want) { - t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want) - } - } -} - -func TestAppendHpackString(t *testing.T) { - tests := []struct { - s, wantHex string - }{ - // Huffman encoded - {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, - - // Not Huffman encoded - {"a", "01 61"}, - - // zero length - {"", "00"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendHpackString(nil, tt.s) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want) - } - } -} - -func TestAppendIndexed(t *testing.T) { - tests := []struct { - i uint64 - wantHex string - }{ - // 1 byte - {1, "81"}, - {126, "fe"}, - - // 2 bytes - {127, "ff00"}, - {128, "ff01"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendIndexed(nil, tt.i) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want) - } - } -} - -func TestAppendNewName(t *testing.T) { - tests := []struct { - f HeaderField - indexing bool - wantHex string - }{ - // Incremental indexing - {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, - - // Without indexing - {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, - - // Never indexed - {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, - {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendNewName(nil, tt.f, tt.indexing) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) - } - } -} - -func TestAppendIndexedName(t *testing.T) { - tests := []struct { - f HeaderField - i uint64 - indexing bool - wantHex string - }{ - // Incremental indexing - {HeaderField{":status", "302", false}, 8, true, "48 82 6402"}, - - // Without indexing - {HeaderField{":status", "302", false}, 8, false, "08 82 6402"}, - - // Never indexed - {HeaderField{":status", "302", true}, 8, true, "18 82 6402"}, - {HeaderField{":status", "302", true}, 8, false, "18 82 6402"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) - } - } -} - -func TestAppendTableSize(t *testing.T) { - tests := []struct { - i uint32 - wantHex string - }{ - // Fits into 1 byte - {30, "3e"}, - - // Extra byte - {31, "3f00"}, - {32, "3f01"}, - } - for _, tt := range tests { - want := removeSpace(tt.wantHex) - buf := appendTableSize(nil, tt.i) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want) - } - } -} - -func TestEncoderSetMaxDynamicTableSize(t *testing.T) { - var buf bytes.Buffer - e := NewEncoder(&buf) - tests := []struct { - v uint32 - wantUpdate bool - wantMinSize uint32 - wantMaxSize uint32 - }{ - // Set new table size to 2048 - {2048, true, 2048, 2048}, - - // Set new table size to 16384, but still limited to - // 4096 - {16384, true, 2048, 4096}, - } - for _, tt := range tests { - e.SetMaxDynamicTableSize(tt.v) - if got := e.tableSizeUpdate; tt.wantUpdate != got { - t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate) - } - if got := e.minSize; tt.wantMinSize != got { - t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize) - } - if got := e.dynTab.maxSize; tt.wantMaxSize != got { - t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize) - } - } -} - -func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { - e := NewEncoder(nil) - // 4095 < initialHeaderTableSize means maxSize is truncated to - // 4095. - e.SetMaxDynamicTableSizeLimit(4095) - if got, want := e.dynTab.maxSize, uint32(4095); got != want { - t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) - } - if got, want := e.maxSizeLimit, uint32(4095); got != want { - t.Errorf("e.maxSizeLimit = %v; want %v", got, want) - } - if got, want := e.tableSizeUpdate, true; got != want { - t.Errorf("e.tableSizeUpdate = %v; want %v", got, want) - } - // maxSize will be truncated to maxSizeLimit - e.SetMaxDynamicTableSize(16384) - if got, want := e.dynTab.maxSize, uint32(4095); got != want { - t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) - } - // 8192 > current maxSizeLimit, so maxSize does not change. - e.SetMaxDynamicTableSizeLimit(8192) - if got, want := e.dynTab.maxSize, uint32(4095); got != want { - t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) - } - if got, want := e.maxSizeLimit, uint32(8192); got != want { - t.Errorf("e.maxSizeLimit = %v; want %v", got, want) - } -} - -func removeSpace(s string) string { - return strings.Replace(s, " ", "", -1) -} - -func BenchmarkEncoderSearchTable(b *testing.B) { - e := NewEncoder(nil) - - // A sample of possible header fields. - // This is not based on any actual data from HTTP/2 traces. - var possible []HeaderField - for _, f := range staticTable.ents { - if f.Value == "" { - possible = append(possible, f) - continue - } - // Generate 5 random values, except for cookie and set-cookie, - // which we know can have many values in practice. - num := 5 - if f.Name == "cookie" || f.Name == "set-cookie" { - num = 25 - } - for i := 0; i < num; i++ { - f.Value = fmt.Sprintf("%s-%d", f.Name, i) - possible = append(possible, f) - } - } - for k := 0; k < 10; k++ { - f := HeaderField{ - Name: fmt.Sprintf("x-header-%d", k), - Sensitive: rand.Int()%2 == 0, - } - for i := 0; i < 5; i++ { - f.Value = fmt.Sprintf("%s-%d", f.Name, i) - possible = append(possible, f) - } - } - - // Add a random sample to the dynamic table. This very loosely simulates - // a history of 100 requests with 20 header fields per request. - for r := 0; r < 100*20; r++ { - f := possible[rand.Int31n(int32(len(possible)))] - // Skip if this is in the staticTable verbatim. - if _, has := staticTable.search(f); !has { - e.dynTab.add(f) - } - } - - b.ResetTimer() - for n := 0; n < b.N; n++ { - for _, f := range possible { - e.searchTable(f) - } - } -} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go deleted file mode 100644 index 176644ac..00000000 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package hpack implements HPACK, a compression format for -// efficiently representing HTTP header fields in the context of HTTP/2. -// -// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 -package hpack - -import ( - "bytes" - "errors" - "fmt" -) - -// A DecodingError is something the spec defines as a decoding error. -type DecodingError struct { - Err error -} - -func (de DecodingError) Error() string { - return fmt.Sprintf("decoding error: %v", de.Err) -} - -// An InvalidIndexError is returned when an encoder references a table -// entry before the static table or after the end of the dynamic table. -type InvalidIndexError int - -func (e InvalidIndexError) Error() string { - return fmt.Sprintf("invalid indexed representation index %d", int(e)) -} - -// A HeaderField is a name-value pair. Both the name and value are -// treated as opaque sequences of octets. -type HeaderField struct { - Name, Value string - - // Sensitive means that this header field should never be - // indexed. - Sensitive bool -} - -// IsPseudo reports whether the header field is an http2 pseudo header. -// That is, it reports whether it starts with a colon. -// It is not otherwise guaranteed to be a valid pseudo header field, -// though. -func (hf HeaderField) IsPseudo() bool { - return len(hf.Name) != 0 && hf.Name[0] == ':' -} - -func (hf HeaderField) String() string { - var suffix string - if hf.Sensitive { - suffix = " (sensitive)" - } - return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) -} - -// Size returns the size of an entry per RFC 7541 section 4.1. -func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 - // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's - // length in octets (as defined in Section 5.2), its value's - // length in octets (see Section 5.2), plus 32. The size of - // an entry is calculated using the length of the name and - // value without any Huffman encoding applied." - - // This can overflow if somebody makes a large HeaderField - // Name and/or Value by hand, but we don't care, because that - // won't happen on the wire because the encoding doesn't allow - // it. - return uint32(len(hf.Name) + len(hf.Value) + 32) -} - -// A Decoder is the decoding context for incremental processing of -// header blocks. -type Decoder struct { - dynTab dynamicTable - emit func(f HeaderField) - - emitEnabled bool // whether calls to emit are enabled - maxStrLen int // 0 means unlimited - - // buf is the unparsed buffer. It's only written to - // saveBuf if it was truncated in the middle of a header - // block. Because it's usually not owned, we can only - // process it under Write. - buf []byte // not owned; only valid during Write - - // saveBuf is previous data passed to Write which we weren't able - // to fully parse before. Unlike buf, we own this data. - saveBuf bytes.Buffer -} - -// NewDecoder returns a new decoder with the provided maximum dynamic -// table size. The emitFunc will be called for each valid field -// parsed, in the same goroutine as calls to Write, before Write returns. -func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { - d := &Decoder{ - emit: emitFunc, - emitEnabled: true, - } - d.dynTab.table.init() - d.dynTab.allowedMaxSize = maxDynamicTableSize - d.dynTab.setMaxSize(maxDynamicTableSize) - return d -} - -// ErrStringLength is returned by Decoder.Write when the max string length -// (as configured by Decoder.SetMaxStringLength) would be violated. -var ErrStringLength = errors.New("hpack: string too long") - -// SetMaxStringLength sets the maximum size of a HeaderField name or -// value string. If a string exceeds this length (even after any -// decompression), Write will return ErrStringLength. -// A value of 0 means unlimited and is the default from NewDecoder. -func (d *Decoder) SetMaxStringLength(n int) { - d.maxStrLen = n -} - -// SetEmitFunc changes the callback used when new header fields -// are decoded. -// It must be non-nil. It does not affect EmitEnabled. -func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { - d.emit = emitFunc -} - -// SetEmitEnabled controls whether the emitFunc provided to NewDecoder -// should be called. The default is true. -// -// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE -// while still decoding and keeping in-sync with decoder state, but -// without doing unnecessary decompression or generating unnecessary -// garbage for header fields past the limit. -func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } - -// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder -// are currently enabled. The default is true. -func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } - -// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their -// underlying buffers for garbage reasons. - -func (d *Decoder) SetMaxDynamicTableSize(v uint32) { - d.dynTab.setMaxSize(v) -} - -// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded -// stream (via dynamic table size updates) may set the maximum size -// to. -func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { - d.dynTab.allowedMaxSize = v -} - -type dynamicTable struct { - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - table headerFieldTable - size uint32 // in bytes - maxSize uint32 // current maxSize - allowedMaxSize uint32 // maxSize may go up to this, inclusive -} - -func (dt *dynamicTable) setMaxSize(v uint32) { - dt.maxSize = v - dt.evict() -} - -func (dt *dynamicTable) add(f HeaderField) { - dt.table.addEntry(f) - dt.size += f.Size() - dt.evict() -} - -// If we're too big, evict old stuff. -func (dt *dynamicTable) evict() { - var n int - for dt.size > dt.maxSize && n < dt.table.len() { - dt.size -= dt.table.ents[n].Size() - n++ - } - dt.table.evictOldest(n) -} - -func (d *Decoder) maxTableIndex() int { - // This should never overflow. RFC 7540 Section 6.5.2 limits the size of - // the dynamic table to 2^32 bytes, where each entry will occupy more than - // one byte. Further, the staticTable has a fixed, small length. - return d.dynTab.table.len() + staticTable.len() -} - -func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - // See Section 2.3.3. - if i == 0 { - return - } - if i <= uint64(staticTable.len()) { - return staticTable.ents[i-1], true - } - if i > uint64(d.maxTableIndex()) { - return - } - // In the dynamic table, newer entries have lower indices. - // However, dt.ents[0] is the oldest entry. Hence, dt.ents is - // the reversed dynamic table. - dt := d.dynTab.table - return dt.ents[dt.len()-(int(i)-staticTable.len())], true -} - -// Decode decodes an entire block. -// -// TODO: remove this method and make it incremental later? This is -// easier for debugging now. -func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { - var hf []HeaderField - saveFunc := d.emit - defer func() { d.emit = saveFunc }() - d.emit = func(f HeaderField) { hf = append(hf, f) } - if _, err := d.Write(p); err != nil { - return nil, err - } - if err := d.Close(); err != nil { - return nil, err - } - return hf, nil -} - -func (d *Decoder) Close() error { - if d.saveBuf.Len() > 0 { - d.saveBuf.Reset() - return DecodingError{errors.New("truncated headers")} - } - return nil -} - -func (d *Decoder) Write(p []byte) (n int, err error) { - if len(p) == 0 { - // Prevent state machine CPU attacks (making us redo - // work up to the point of finding out we don't have - // enough data) - return - } - // Only copy the data if we have to. Optimistically assume - // that p will contain a complete header block. - if d.saveBuf.Len() == 0 { - d.buf = p - } else { - d.saveBuf.Write(p) - d.buf = d.saveBuf.Bytes() - d.saveBuf.Reset() - } - - for len(d.buf) > 0 { - err = d.parseHeaderFieldRepr() - if err == errNeedMore { - // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string - // reading code earlier should already catch - // overlong things and return ErrStringLength, - // but keep this as a last resort. - const varIntOverhead = 8 // conservative - if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { - return 0, ErrStringLength - } - d.saveBuf.Write(d.buf) - return len(p), nil - } - if err != nil { - break - } - } - return len(p), err -} - -// errNeedMore is an internal sentinel error value that means the -// buffer is truncated and we need to read more data before we can -// continue parsing. -var errNeedMore = errors.New("need more data") - -type indexType int - -const ( - indexedTrue indexType = iota - indexedFalse - indexedNever -) - -func (v indexType) indexed() bool { return v == indexedTrue } -func (v indexType) sensitive() bool { return v == indexedNever } - -// returns errNeedMore if there isn't enough data available. -// any other error is fatal. -// consumes d.buf iff it returns nil. -// precondition: must be called with len(d.buf) > 0 -func (d *Decoder) parseHeaderFieldRepr() error { - b := d.buf[0] - switch { - case b&128 != 0: - // Indexed representation. - // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 - return d.parseFieldIndexed() - case b&192 == 64: - // 6.2.1 Literal Header Field with Incremental Indexing - // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 - return d.parseFieldLiteral(6, indexedTrue) - case b&240 == 0: - // 6.2.2 Literal Header Field without Indexing - // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 - return d.parseFieldLiteral(4, indexedFalse) - case b&240 == 16: - // 6.2.3 Literal Header Field never Indexed - // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 - return d.parseFieldLiteral(4, indexedNever) - case b&224 == 32: - // 6.3 Dynamic Table Size Update - // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 - return d.parseDynamicTableSizeUpdate() - } - - return DecodingError{errors.New("invalid encoding")} -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldIndexed() error { - buf := d.buf - idx, buf, err := readVarInt(7, buf) - if err != nil { - return err - } - hf, ok := d.at(idx) - if !ok { - return DecodingError{InvalidIndexError(idx)} - } - d.buf = buf - return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { - buf := d.buf - nameIdx, buf, err := readVarInt(n, buf) - if err != nil { - return err - } - - var hf HeaderField - wantStr := d.emitEnabled || it.indexed() - if nameIdx > 0 { - ihf, ok := d.at(nameIdx) - if !ok { - return DecodingError{InvalidIndexError(nameIdx)} - } - hf.Name = ihf.Name - } else { - hf.Name, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - } - hf.Value, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - d.buf = buf - if it.indexed() { - d.dynTab.add(hf) - } - hf.Sensitive = it.sensitive() - return d.callEmit(hf) -} - -func (d *Decoder) callEmit(hf HeaderField) error { - if d.maxStrLen != 0 { - if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { - return ErrStringLength - } - } - if d.emitEnabled { - d.emit(hf) - } - return nil -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseDynamicTableSizeUpdate() error { - buf := d.buf - size, buf, err := readVarInt(5, buf) - if err != nil { - return err - } - if size > uint64(d.dynTab.allowedMaxSize) { - return DecodingError{errors.New("dynamic table size update too large")} - } - d.dynTab.setMaxSize(uint32(size)) - d.buf = buf - return nil -} - -var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} - -// readVarInt reads an unsigned variable length integer off the -// beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. -// -// n must always be between 1 and 8. -// -// The returned remain buffer is either a smaller suffix of p, or err != nil. -// The error is errNeedMore if p doesn't contain a complete integer. -func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { - if n < 1 || n > 8 { - panic("bad n") - } - if len(p) == 0 { - return 0, p, errNeedMore - } - i = uint64(p[0]) - if n < 8 { - i &= (1 << uint64(n)) - 1 - } - if i < (1< 0 { - b := p[0] - p = p[1:] - i += uint64(b&127) << m - if b&128 == 0 { - return i, p, nil - } - m += 7 - if m >= 63 { // TODO: proper overflow check. making this up. - return 0, origP, errVarintOverflow - } - } - return 0, origP, errNeedMore -} - -// readString decodes an hpack string from p. -// -// wantStr is whether s will be used. If false, decompression and -// []byte->string garbage are skipped if s will be ignored -// anyway. This does mean that huffman decoding errors for non-indexed -// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server -// is returning an error anyway, and because they're not indexed, the error -// won't affect the decoding state. -func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { - if len(p) == 0 { - return "", p, errNeedMore - } - isHuff := p[0]&128 != 0 - strLen, p, err := readVarInt(7, p) - if err != nil { - return "", p, err - } - if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { - return "", nil, ErrStringLength - } - if uint64(len(p)) < strLen { - return "", p, errNeedMore - } - if !isHuff { - if wantStr { - s = string(p[:strLen]) - } - return s, p[strLen:], nil - } - - if wantStr { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() // don't trust others - defer bufPool.Put(buf) - if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { - buf.Reset() - return "", nil, err - } - s = buf.String() - buf.Reset() // be nice to GC - } - return s, p[strLen:], nil -} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go deleted file mode 100644 index bc7f4767..00000000 --- a/vendor/golang.org/x/net/http2/hpack/hpack_test.go +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bytes" - "encoding/hex" - "fmt" - "math/rand" - "reflect" - "strings" - "testing" - "time" -) - -func (d *Decoder) mustAt(idx int) HeaderField { - if hf, ok := d.at(uint64(idx)); !ok { - panic(fmt.Sprintf("bogus index %d", idx)) - } else { - return hf - } -} - -func TestDynamicTableAt(t *testing.T) { - d := NewDecoder(4096, nil) - at := d.mustAt - if got, want := at(2), (pair(":method", "GET")); got != want { - t.Errorf("at(2) = %v; want %v", got, want) - } - d.dynTab.add(pair("foo", "bar")) - d.dynTab.add(pair("blake", "miz")) - if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want { - t.Errorf("at(dyn 1) = %v; want %v", got, want) - } - if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want { - t.Errorf("at(dyn 2) = %v; want %v", got, want) - } - if got, want := at(3), (pair(":method", "POST")); got != want { - t.Errorf("at(3) = %v; want %v", got, want) - } -} - -func TestDynamicTableSizeEvict(t *testing.T) { - d := NewDecoder(4096, nil) - if want := uint32(0); d.dynTab.size != want { - t.Fatalf("size = %d; want %d", d.dynTab.size, want) - } - add := d.dynTab.add - add(pair("blake", "eats pizza")) - if want := uint32(15 + 32); d.dynTab.size != want { - t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want) - } - add(pair("foo", "bar")) - if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want { - t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want) - } - d.dynTab.setMaxSize(15 + 32 + 1 /* slop */) - if want := uint32(6 + 32); d.dynTab.size != want { - t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) - } - if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want { - t.Errorf("at(dyn 1) = %v; want %v", got, want) - } - add(pair("long", strings.Repeat("x", 500))) - if want := uint32(0); d.dynTab.size != want { - t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want) - } -} - -func TestDecoderDecode(t *testing.T) { - tests := []struct { - name string - in []byte - want []HeaderField - wantDynTab []HeaderField // newest entry first - }{ - // C.2.1 Literal Header Field with Indexing - // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1 - {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"), - []HeaderField{pair("custom-key", "custom-header")}, - []HeaderField{pair("custom-key", "custom-header")}, - }, - - // C.2.2 Literal Header Field without Indexing - // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2 - {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"), - []HeaderField{pair(":path", "/sample/path")}, - []HeaderField{}}, - - // C.2.3 Literal Header Field never Indexed - // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3 - {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"), - []HeaderField{{"password", "secret", true}}, - []HeaderField{}}, - - // C.2.4 Indexed Header Field - // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4 - {"C.2.4", []byte("\x82"), - []HeaderField{pair(":method", "GET")}, - []HeaderField{}}, - } - for _, tt := range tests { - d := NewDecoder(4096, nil) - hf, err := d.DecodeFull(tt.in) - if err != nil { - t.Errorf("%s: %v", tt.name, err) - continue - } - if !reflect.DeepEqual(hf, tt.want) { - t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want) - } - gotDynTab := d.dynTab.reverseCopy() - if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) { - t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab) - } - } -} - -func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { - hf = make([]HeaderField, len(dt.table.ents)) - for i := range hf { - hf[i] = dt.table.ents[len(dt.table.ents)-1-i] - } - return -} - -type encAndWant struct { - enc []byte - want []HeaderField - wantDynTab []HeaderField - wantDynSize uint32 -} - -// C.3 Request Examples without Huffman Coding -// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3 -func TestDecodeC3_NoHuffman(t *testing.T) { - testDecodeSeries(t, 4096, []encAndWant{ - {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - }, - []HeaderField{ - pair(":authority", "www.example.com"), - }, - 57, - }, - {dehex("8286 84be 5808 6e6f 2d63 6163 6865"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - pair("cache-control", "no-cache"), - }, - []HeaderField{ - pair("cache-control", "no-cache"), - pair(":authority", "www.example.com"), - }, - 110, - }, - {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "https"), - pair(":path", "/index.html"), - pair(":authority", "www.example.com"), - pair("custom-key", "custom-value"), - }, - []HeaderField{ - pair("custom-key", "custom-value"), - pair("cache-control", "no-cache"), - pair(":authority", "www.example.com"), - }, - 164, - }, - }) -} - -// C.4 Request Examples with Huffman Coding -// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4 -func TestDecodeC4_Huffman(t *testing.T) { - testDecodeSeries(t, 4096, []encAndWant{ - {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - }, - []HeaderField{ - pair(":authority", "www.example.com"), - }, - 57, - }, - {dehex("8286 84be 5886 a8eb 1064 9cbf"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "http"), - pair(":path", "/"), - pair(":authority", "www.example.com"), - pair("cache-control", "no-cache"), - }, - []HeaderField{ - pair("cache-control", "no-cache"), - pair(":authority", "www.example.com"), - }, - 110, - }, - {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"), - []HeaderField{ - pair(":method", "GET"), - pair(":scheme", "https"), - pair(":path", "/index.html"), - pair(":authority", "www.example.com"), - pair("custom-key", "custom-value"), - }, - []HeaderField{ - pair("custom-key", "custom-value"), - pair("cache-control", "no-cache"), - pair(":authority", "www.example.com"), - }, - 164, - }, - }) -} - -// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5 -// "This section shows several consecutive header lists, corresponding -// to HTTP responses, on the same connection. The HTTP/2 setting -// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 -// octets, causing some evictions to occur." -func TestDecodeC5_ResponsesNoHuff(t *testing.T) { - testDecodeSeries(t, 256, []encAndWant{ - {dehex(` -4803 3330 3258 0770 7269 7661 7465 611d -4d6f 6e2c 2032 3120 4f63 7420 3230 3133 -2032 303a 3133 3a32 3120 474d 546e 1768 -7474 7073 3a2f 2f77 7777 2e65 7861 6d70 -6c65 2e63 6f6d -`), - []HeaderField{ - pair(":status", "302"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("location", "https://www.example.com"), - }, - []HeaderField{ - pair("location", "https://www.example.com"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("cache-control", "private"), - pair(":status", "302"), - }, - 222, - }, - {dehex("4803 3330 37c1 c0bf"), - []HeaderField{ - pair(":status", "307"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("location", "https://www.example.com"), - }, - []HeaderField{ - pair(":status", "307"), - pair("location", "https://www.example.com"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("cache-control", "private"), - }, - 222, - }, - {dehex(` -88c1 611d 4d6f 6e2c 2032 3120 4f63 7420 -3230 3133 2032 303a 3133 3a32 3220 474d -54c0 5a04 677a 6970 7738 666f 6f3d 4153 -444a 4b48 514b 425a 584f 5157 454f 5049 -5541 5851 5745 4f49 553b 206d 6178 2d61 -6765 3d33 3630 303b 2076 6572 7369 6f6e -3d31 -`), - []HeaderField{ - pair(":status", "200"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), - pair("location", "https://www.example.com"), - pair("content-encoding", "gzip"), - pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), - }, - []HeaderField{ - pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), - pair("content-encoding", "gzip"), - pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), - }, - 215, - }, - }) -} - -// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6 -// "This section shows the same examples as the previous section, but -// using Huffman encoding for the literal values. The HTTP/2 setting -// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 -// octets, causing some evictions to occur. The eviction mechanism -// uses the length of the decoded literal values, so the same -// evictions occurs as in the previous section." -func TestDecodeC6_ResponsesHuffman(t *testing.T) { - testDecodeSeries(t, 256, []encAndWant{ - {dehex(` -4882 6402 5885 aec3 771a 4b61 96d0 7abe -9410 54d4 44a8 2005 9504 0b81 66e0 82a6 -2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8 -e9ae 82ae 43d3 -`), - []HeaderField{ - pair(":status", "302"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("location", "https://www.example.com"), - }, - []HeaderField{ - pair("location", "https://www.example.com"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("cache-control", "private"), - pair(":status", "302"), - }, - 222, - }, - {dehex("4883 640e ffc1 c0bf"), - []HeaderField{ - pair(":status", "307"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("location", "https://www.example.com"), - }, - []HeaderField{ - pair(":status", "307"), - pair("location", "https://www.example.com"), - pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), - pair("cache-control", "private"), - }, - 222, - }, - {dehex(` -88c1 6196 d07a be94 1054 d444 a820 0595 -040b 8166 e084 a62d 1bff c05a 839b d9ab -77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b -3960 d5af 2708 7f36 72c1 ab27 0fb5 291f -9587 3160 65c0 03ed 4ee5 b106 3d50 07 -`), - []HeaderField{ - pair(":status", "200"), - pair("cache-control", "private"), - pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), - pair("location", "https://www.example.com"), - pair("content-encoding", "gzip"), - pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), - }, - []HeaderField{ - pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), - pair("content-encoding", "gzip"), - pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), - }, - 215, - }, - }) -} - -func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) { - d := NewDecoder(size, nil) - for i, step := range steps { - hf, err := d.DecodeFull(step.enc) - if err != nil { - t.Fatalf("Error at step index %d: %v", i, err) - } - if !reflect.DeepEqual(hf, step.want) { - t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want) - } - gotDynTab := d.dynTab.reverseCopy() - if !reflect.DeepEqual(gotDynTab, step.wantDynTab) { - t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab) - } - if d.dynTab.size != step.wantDynSize { - t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize) - } - } -} - -func TestHuffmanDecodeExcessPadding(t *testing.T) { - tests := [][]byte{ - {0xff}, // Padding Exceeds 7 bits - {0x1f, 0xff}, // {"a", 1 byte excess padding} - {0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding} - {0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding} - {0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding} - {'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol. - } - for i, in := range tests { - var buf bytes.Buffer - if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { - t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err) - } - } -} - -func TestHuffmanDecodeEOS(t *testing.T) { - in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"} - var buf bytes.Buffer - if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { - t.Errorf("error = %v; want ErrInvalidHuffman", err) - } -} - -func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) { - in := []byte{0x00, 0x01} // {"0", "0", "0"} - var buf bytes.Buffer - if err := huffmanDecode(&buf, 2, in); err != ErrStringLength { - t.Errorf("error = %v; want ErrStringLength", err) - } -} - -func TestHuffmanDecodeCorruptPadding(t *testing.T) { - in := []byte{0x00} - var buf bytes.Buffer - if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { - t.Errorf("error = %v; want ErrInvalidHuffman", err) - } -} - -func TestHuffmanDecode(t *testing.T) { - tests := []struct { - inHex, want string - }{ - {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"}, - {"a8eb 1064 9cbf", "no-cache"}, - {"25a8 49e9 5ba9 7d7f", "custom-key"}, - {"25a8 49e9 5bb8 e8b4 bf", "custom-value"}, - {"6402", "302"}, - {"aec3 771a 4b", "private"}, - {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"}, - {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"}, - {"9bd9 ab", "gzip"}, - {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07", - "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"}, - } - for i, tt := range tests { - var buf bytes.Buffer - in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1)) - if err != nil { - t.Errorf("%d. hex input error: %v", i, err) - continue - } - if _, err := HuffmanDecode(&buf, in); err != nil { - t.Errorf("%d. decode error: %v", i, err) - continue - } - if got := buf.String(); tt.want != got { - t.Errorf("%d. decode = %q; want %q", i, got, tt.want) - } - } -} - -func TestAppendHuffmanString(t *testing.T) { - tests := []struct { - in, want string - }{ - {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, - {"no-cache", "a8eb 1064 9cbf"}, - {"custom-key", "25a8 49e9 5ba9 7d7f"}, - {"custom-value", "25a8 49e9 5bb8 e8b4 bf"}, - {"302", "6402"}, - {"private", "aec3 771a 4b"}, - {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"}, - {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"}, - {"gzip", "9bd9 ab"}, - {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", - "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"}, - } - for i, tt := range tests { - buf := []byte{} - want := strings.Replace(tt.want, " ", "", -1) - buf = AppendHuffmanString(buf, tt.in) - if got := hex.EncodeToString(buf); want != got { - t.Errorf("%d. encode = %q; want %q", i, got, want) - } - } -} - -func TestHuffmanMaxStrLen(t *testing.T) { - const msg = "Some string" - huff := AppendHuffmanString(nil, msg) - - testGood := func(max int) { - var out bytes.Buffer - if err := huffmanDecode(&out, max, huff); err != nil { - t.Errorf("For maxLen=%d, unexpected error: %v", max, err) - } - if out.String() != msg { - t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg) - } - } - testGood(0) - testGood(len(msg)) - testGood(len(msg) + 1) - - var out bytes.Buffer - if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength { - t.Errorf("err = %v; want ErrStringLength", err) - } -} - -func TestHuffmanRoundtripStress(t *testing.T) { - const Len = 50 // of uncompressed string - input := make([]byte, Len) - var output bytes.Buffer - var huff []byte - - n := 5000 - if testing.Short() { - n = 100 - } - seed := time.Now().UnixNano() - t.Logf("Seed = %v", seed) - src := rand.New(rand.NewSource(seed)) - var encSize int64 - for i := 0; i < n; i++ { - for l := range input { - input[l] = byte(src.Intn(256)) - } - huff = AppendHuffmanString(huff[:0], string(input)) - encSize += int64(len(huff)) - output.Reset() - if err := huffmanDecode(&output, 0, huff); err != nil { - t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err) - continue - } - if !bytes.Equal(output.Bytes(), input) { - t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes()) - } - } - t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize) -} - -func TestHuffmanDecodeFuzz(t *testing.T) { - const Len = 50 // of compressed - var buf, zbuf bytes.Buffer - - n := 5000 - if testing.Short() { - n = 100 - } - seed := time.Now().UnixNano() - t.Logf("Seed = %v", seed) - src := rand.New(rand.NewSource(seed)) - numFail := 0 - for i := 0; i < n; i++ { - zbuf.Reset() - if i == 0 { - // Start with at least one invalid one. - zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8") - } else { - for l := 0; l < Len; l++ { - zbuf.WriteByte(byte(src.Intn(256))) - } - } - - buf.Reset() - if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil { - if err == ErrInvalidHuffman { - numFail++ - continue - } - t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err) - continue - } - } - t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n) - if numFail < 1 { - t.Error("expected at least one invalid huffman encoding (test starts with one)") - } -} - -func TestReadVarInt(t *testing.T) { - type res struct { - i uint64 - consumed int - err error - } - tests := []struct { - n byte - p []byte - want res - }{ - // Fits in a byte: - {1, []byte{0}, res{0, 1, nil}}, - {2, []byte{2}, res{2, 1, nil}}, - {3, []byte{6}, res{6, 1, nil}}, - {4, []byte{14}, res{14, 1, nil}}, - {5, []byte{30}, res{30, 1, nil}}, - {6, []byte{62}, res{62, 1, nil}}, - {7, []byte{126}, res{126, 1, nil}}, - {8, []byte{254}, res{254, 1, nil}}, - - // Doesn't fit in a byte: - {1, []byte{1}, res{0, 0, errNeedMore}}, - {2, []byte{3}, res{0, 0, errNeedMore}}, - {3, []byte{7}, res{0, 0, errNeedMore}}, - {4, []byte{15}, res{0, 0, errNeedMore}}, - {5, []byte{31}, res{0, 0, errNeedMore}}, - {6, []byte{63}, res{0, 0, errNeedMore}}, - {7, []byte{127}, res{0, 0, errNeedMore}}, - {8, []byte{255}, res{0, 0, errNeedMore}}, - - // Ignoring top bits: - {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111 - {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100 - {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101 - - // Extra byte: - {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte - - // Short a byte: - {5, []byte{191, 154}, res{0, 0, errNeedMore}}, - - // integer overflow: - {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}}, - } - for _, tt := range tests { - i, remain, err := readVarInt(tt.n, tt.p) - consumed := len(tt.p) - len(remain) - got := res{i, consumed, err} - if got != tt.want { - t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want) - } - } -} - -// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56 -func TestHuffmanFuzzCrash(t *testing.T) { - got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8")) - if got != "" { - t.Errorf("Got %q; want empty string", got) - } - if err != ErrInvalidHuffman { - t.Errorf("Err = %v; want ErrInvalidHuffman", err) - } -} - -func pair(name, value string) HeaderField { - return HeaderField{Name: name, Value: value} -} - -func dehex(s string) []byte { - s = strings.Replace(s, " ", "", -1) - s = strings.Replace(s, "\n", "", -1) - b, err := hex.DecodeString(s) - if err != nil { - panic(err) - } - return b -} - -func TestEmitEnabled(t *testing.T) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) - enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) - - numCallback := 0 - var dec *Decoder - dec = NewDecoder(8<<20, func(HeaderField) { - numCallback++ - dec.SetEmitEnabled(false) - }) - if !dec.EmitEnabled() { - t.Errorf("initial emit enabled = false; want true") - } - if _, err := dec.Write(buf.Bytes()); err != nil { - t.Error(err) - } - if numCallback != 1 { - t.Errorf("num callbacks = %d; want 1", numCallback) - } - if dec.EmitEnabled() { - t.Errorf("emit enabled = true; want false") - } -} - -func TestSaveBufLimit(t *testing.T) { - const maxStr = 1 << 10 - var got []HeaderField - dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) { - got = append(got, hf) - }) - dec.SetMaxStringLength(maxStr) - var frag []byte - frag = append(frag[:0], encodeTypeByte(false, false)) - frag = appendVarInt(frag, 7, 3) - frag = append(frag, "foo"...) - frag = appendVarInt(frag, 7, 3) - frag = append(frag, "bar"...) - - if _, err := dec.Write(frag); err != nil { - t.Fatal(err) - } - - want := []HeaderField{{Name: "foo", Value: "bar"}} - if !reflect.DeepEqual(got, want) { - t.Errorf("After small writes, got %v; want %v", got, want) - } - - frag = append(frag[:0], encodeTypeByte(false, false)) - frag = appendVarInt(frag, 7, maxStr*3) - frag = append(frag, make([]byte, maxStr*3)...) - - _, err := dec.Write(frag) - if err != ErrStringLength { - t.Fatalf("Write error = %v; want ErrStringLength", err) - } -} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go deleted file mode 100644 index 8850e394..00000000 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bytes" - "errors" - "io" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { return new(bytes.Buffer) }, -} - -// HuffmanDecode decodes the string in v and writes the expanded -// result to w, returning the number of bytes written to w and the -// Write call's return value. At most one Write call is made. -func HuffmanDecode(w io.Writer, v []byte) (int, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return 0, err - } - return w.Write(buf.Bytes()) -} - -// HuffmanDecodeToString decodes the string in v. -func HuffmanDecodeToString(v []byte) (string, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return "", err - } - return buf.String(), nil -} - -// ErrInvalidHuffman is returned for errors found decoding -// Huffman-encoded strings. -var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") - -// huffmanDecode decodes v to buf. -// If maxLen is greater than 0, attempts to write more to buf than -// maxLen bytes will return ErrStringLength. -func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { - n := rootHuffmanNode - // cur is the bit buffer that has not been fed into n. - // cbits is the number of low order bits in cur that are valid. - // sbits is the number of bits of the symbol prefix being decoded. - cur, cbits, sbits := uint(0), uint8(0), uint8(0) - for _, b := range v { - cur = cur<<8 | uint(b) - cbits += 8 - sbits += 8 - for cbits >= 8 { - idx := byte(cur >> (cbits - 8)) - n = n.children[idx] - if n == nil { - return ErrInvalidHuffman - } - if n.children == nil { - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } else { - cbits -= 8 - } - } - } - for cbits > 0 { - n = n.children[byte(cur<<(8-cbits))] - if n == nil { - return ErrInvalidHuffman - } - if n.children != nil || n.codeLen > cbits { - break - } - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } - if sbits > 7 { - // Either there was an incomplete symbol, or overlong padding. - // Both are decoding errors per RFC 7541 section 5.2. - return ErrInvalidHuffman - } - if mask := uint(1< 8 { - codeLen -= 8 - i := uint8(code >> codeLen) - if cur.children[i] == nil { - cur.children[i] = newInternalNode() - } - cur = cur.children[i] - } - shift := 8 - codeLen - start, end := int(uint8(code<> (nbits - rembits)) - dst[len(dst)-1] |= t - } - - return dst -} - -// HuffmanEncodeLength returns the number of bytes required to encode -// s in Huffman codes. The result is round up to byte boundary. -func HuffmanEncodeLength(s string) uint64 { - n := uint64(0) - for i := 0; i < len(s); i++ { - n += uint64(huffmanCodeLen[s[i]]) - } - return (n + 7) / 8 -} - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go deleted file mode 100644 index a66cfbea..00000000 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "fmt" -) - -// headerFieldTable implements a list of HeaderFields. -// This is used to implement the static and dynamic tables. -type headerFieldTable struct { - // For static tables, entries are never evicted. - // - // For dynamic tables, entries are evicted from ents[0] and added to the end. - // Each entry has a unique id that starts at one and increments for each - // entry that is added. This unique id is stable across evictions, meaning - // it can be used as a pointer to a specific entry. As in hpack, unique ids - // are 1-based. The unique id for ents[k] is k + evictCount + 1. - // - // Zero is not a valid unique id. - // - // evictCount should not overflow in any remotely practical situation. In - // practice, we will have one dynamic table per HTTP/2 connection. If we - // assume a very powerful server that handles 1M QPS per connection and each - // request adds (then evicts) 100 entries from the table, it would still take - // 2M years for evictCount to overflow. - ents []HeaderField - evictCount uint64 - - // byName maps a HeaderField name to the unique id of the newest entry with - // the same name. See above for a definition of "unique id". - byName map[string]uint64 - - // byNameValue maps a HeaderField name/value pair to the unique id of the newest - // entry with the same name and value. See above for a definition of "unique id". - byNameValue map[pairNameValue]uint64 -} - -type pairNameValue struct { - name, value string -} - -func (t *headerFieldTable) init() { - t.byName = make(map[string]uint64) - t.byNameValue = make(map[pairNameValue]uint64) -} - -// len reports the number of entries in the table. -func (t *headerFieldTable) len() int { - return len(t.ents) -} - -// addEntry adds a new entry. -func (t *headerFieldTable) addEntry(f HeaderField) { - id := uint64(t.len()) + t.evictCount + 1 - t.byName[f.Name] = id - t.byNameValue[pairNameValue{f.Name, f.Value}] = id - t.ents = append(t.ents, f) -} - -// evictOldest evicts the n oldest entries in the table. -func (t *headerFieldTable) evictOldest(n int) { - if n > t.len() { - panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) - } - for k := 0; k < n; k++ { - f := t.ents[k] - id := t.evictCount + uint64(k) + 1 - if t.byName[f.Name] == id { - delete(t.byName, f.Name) - } - if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { - delete(t.byNameValue, p) - } - } - copy(t.ents, t.ents[n:]) - for k := t.len() - n; k < t.len(); k++ { - t.ents[k] = HeaderField{} // so strings can be garbage collected - } - t.ents = t.ents[:t.len()-n] - if t.evictCount+uint64(n) < t.evictCount { - panic("evictCount overflow") - } - t.evictCount += uint64(n) -} - -// search finds f in the table. If there is no match, i is 0. -// If both name and value match, i is the matched index and nameValueMatch -// becomes true. If only name matches, i points to that index and -// nameValueMatch becomes false. -// -// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says -// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, -// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic -// table, the return value i actually refers to the entry t.ents[t.len()-i]. -// -// All tables are assumed to be a dynamic tables except for the global -// staticTable pointer. -// -// See Section 2.3.3. -func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - if !f.Sensitive { - if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { - return t.idToIndex(id), true - } - } - if id := t.byName[f.Name]; id != 0 { - return t.idToIndex(id), false - } - return 0, false -} - -// idToIndex converts a unique id to an HPACK index. -// See Section 2.3.3. -func (t *headerFieldTable) idToIndex(id uint64) uint64 { - if id <= t.evictCount { - panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) - } - k := id - t.evictCount - 1 // convert id to an index t.ents[k] - if t != staticTable { - return uint64(t.len()) - k // dynamic table - } - return k + 1 -} - -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = newStaticTable() -var staticTableEntries = [...]HeaderField{ - {Name: ":authority"}, - {Name: ":method", Value: "GET"}, - {Name: ":method", Value: "POST"}, - {Name: ":path", Value: "/"}, - {Name: ":path", Value: "/index.html"}, - {Name: ":scheme", Value: "http"}, - {Name: ":scheme", Value: "https"}, - {Name: ":status", Value: "200"}, - {Name: ":status", Value: "204"}, - {Name: ":status", Value: "206"}, - {Name: ":status", Value: "304"}, - {Name: ":status", Value: "400"}, - {Name: ":status", Value: "404"}, - {Name: ":status", Value: "500"}, - {Name: "accept-charset"}, - {Name: "accept-encoding", Value: "gzip, deflate"}, - {Name: "accept-language"}, - {Name: "accept-ranges"}, - {Name: "accept"}, - {Name: "access-control-allow-origin"}, - {Name: "age"}, - {Name: "allow"}, - {Name: "authorization"}, - {Name: "cache-control"}, - {Name: "content-disposition"}, - {Name: "content-encoding"}, - {Name: "content-language"}, - {Name: "content-length"}, - {Name: "content-location"}, - {Name: "content-range"}, - {Name: "content-type"}, - {Name: "cookie"}, - {Name: "date"}, - {Name: "etag"}, - {Name: "expect"}, - {Name: "expires"}, - {Name: "from"}, - {Name: "host"}, - {Name: "if-match"}, - {Name: "if-modified-since"}, - {Name: "if-none-match"}, - {Name: "if-range"}, - {Name: "if-unmodified-since"}, - {Name: "last-modified"}, - {Name: "link"}, - {Name: "location"}, - {Name: "max-forwards"}, - {Name: "proxy-authenticate"}, - {Name: "proxy-authorization"}, - {Name: "range"}, - {Name: "referer"}, - {Name: "refresh"}, - {Name: "retry-after"}, - {Name: "server"}, - {Name: "set-cookie"}, - {Name: "strict-transport-security"}, - {Name: "transfer-encoding"}, - {Name: "user-agent"}, - {Name: "vary"}, - {Name: "via"}, - {Name: "www-authenticate"}, -} - -func newStaticTable() *headerFieldTable { - t := &headerFieldTable{} - t.init() - for _, e := range staticTableEntries[:] { - t.addEntry(e) - } - return t -} - -var huffmanCodes = [256]uint32{ - 0x1ff8, - 0x7fffd8, - 0xfffffe2, - 0xfffffe3, - 0xfffffe4, - 0xfffffe5, - 0xfffffe6, - 0xfffffe7, - 0xfffffe8, - 0xffffea, - 0x3ffffffc, - 0xfffffe9, - 0xfffffea, - 0x3ffffffd, - 0xfffffeb, - 0xfffffec, - 0xfffffed, - 0xfffffee, - 0xfffffef, - 0xffffff0, - 0xffffff1, - 0xffffff2, - 0x3ffffffe, - 0xffffff3, - 0xffffff4, - 0xffffff5, - 0xffffff6, - 0xffffff7, - 0xffffff8, - 0xffffff9, - 0xffffffa, - 0xffffffb, - 0x14, - 0x3f8, - 0x3f9, - 0xffa, - 0x1ff9, - 0x15, - 0xf8, - 0x7fa, - 0x3fa, - 0x3fb, - 0xf9, - 0x7fb, - 0xfa, - 0x16, - 0x17, - 0x18, - 0x0, - 0x1, - 0x2, - 0x19, - 0x1a, - 0x1b, - 0x1c, - 0x1d, - 0x1e, - 0x1f, - 0x5c, - 0xfb, - 0x7ffc, - 0x20, - 0xffb, - 0x3fc, - 0x1ffa, - 0x21, - 0x5d, - 0x5e, - 0x5f, - 0x60, - 0x61, - 0x62, - 0x63, - 0x64, - 0x65, - 0x66, - 0x67, - 0x68, - 0x69, - 0x6a, - 0x6b, - 0x6c, - 0x6d, - 0x6e, - 0x6f, - 0x70, - 0x71, - 0x72, - 0xfc, - 0x73, - 0xfd, - 0x1ffb, - 0x7fff0, - 0x1ffc, - 0x3ffc, - 0x22, - 0x7ffd, - 0x3, - 0x23, - 0x4, - 0x24, - 0x5, - 0x25, - 0x26, - 0x27, - 0x6, - 0x74, - 0x75, - 0x28, - 0x29, - 0x2a, - 0x7, - 0x2b, - 0x76, - 0x2c, - 0x8, - 0x9, - 0x2d, - 0x77, - 0x78, - 0x79, - 0x7a, - 0x7b, - 0x7ffe, - 0x7fc, - 0x3ffd, - 0x1ffd, - 0xffffffc, - 0xfffe6, - 0x3fffd2, - 0xfffe7, - 0xfffe8, - 0x3fffd3, - 0x3fffd4, - 0x3fffd5, - 0x7fffd9, - 0x3fffd6, - 0x7fffda, - 0x7fffdb, - 0x7fffdc, - 0x7fffdd, - 0x7fffde, - 0xffffeb, - 0x7fffdf, - 0xffffec, - 0xffffed, - 0x3fffd7, - 0x7fffe0, - 0xffffee, - 0x7fffe1, - 0x7fffe2, - 0x7fffe3, - 0x7fffe4, - 0x1fffdc, - 0x3fffd8, - 0x7fffe5, - 0x3fffd9, - 0x7fffe6, - 0x7fffe7, - 0xffffef, - 0x3fffda, - 0x1fffdd, - 0xfffe9, - 0x3fffdb, - 0x3fffdc, - 0x7fffe8, - 0x7fffe9, - 0x1fffde, - 0x7fffea, - 0x3fffdd, - 0x3fffde, - 0xfffff0, - 0x1fffdf, - 0x3fffdf, - 0x7fffeb, - 0x7fffec, - 0x1fffe0, - 0x1fffe1, - 0x3fffe0, - 0x1fffe2, - 0x7fffed, - 0x3fffe1, - 0x7fffee, - 0x7fffef, - 0xfffea, - 0x3fffe2, - 0x3fffe3, - 0x3fffe4, - 0x7ffff0, - 0x3fffe5, - 0x3fffe6, - 0x7ffff1, - 0x3ffffe0, - 0x3ffffe1, - 0xfffeb, - 0x7fff1, - 0x3fffe7, - 0x7ffff2, - 0x3fffe8, - 0x1ffffec, - 0x3ffffe2, - 0x3ffffe3, - 0x3ffffe4, - 0x7ffffde, - 0x7ffffdf, - 0x3ffffe5, - 0xfffff1, - 0x1ffffed, - 0x7fff2, - 0x1fffe3, - 0x3ffffe6, - 0x7ffffe0, - 0x7ffffe1, - 0x3ffffe7, - 0x7ffffe2, - 0xfffff2, - 0x1fffe4, - 0x1fffe5, - 0x3ffffe8, - 0x3ffffe9, - 0xffffffd, - 0x7ffffe3, - 0x7ffffe4, - 0x7ffffe5, - 0xfffec, - 0xfffff3, - 0xfffed, - 0x1fffe6, - 0x3fffe9, - 0x1fffe7, - 0x1fffe8, - 0x7ffff3, - 0x3fffea, - 0x3fffeb, - 0x1ffffee, - 0x1ffffef, - 0xfffff4, - 0xfffff5, - 0x3ffffea, - 0x7ffff4, - 0x3ffffeb, - 0x7ffffe6, - 0x3ffffec, - 0x3ffffed, - 0x7ffffe7, - 0x7ffffe8, - 0x7ffffe9, - 0x7ffffea, - 0x7ffffeb, - 0xffffffe, - 0x7ffffec, - 0x7ffffed, - 0x7ffffee, - 0x7ffffef, - 0x7fffff0, - 0x3ffffee, -} - -var huffmanCodeLen = [256]uint8{ - 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, - 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, - 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, - 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, - 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, - 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, - 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, - 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, - 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, - 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, - 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, - 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, - 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, - 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, -} diff --git a/vendor/golang.org/x/net/http2/hpack/tables_test.go b/vendor/golang.org/x/net/http2/hpack/tables_test.go deleted file mode 100644 index d963f363..00000000 --- a/vendor/golang.org/x/net/http2/hpack/tables_test.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bufio" - "regexp" - "strconv" - "strings" - "testing" -) - -func TestHeaderFieldTable(t *testing.T) { - table := &headerFieldTable{} - table.init() - table.addEntry(pair("key1", "value1-1")) - table.addEntry(pair("key2", "value2-1")) - table.addEntry(pair("key1", "value1-2")) - table.addEntry(pair("key3", "value3-1")) - table.addEntry(pair("key4", "value4-1")) - table.addEntry(pair("key2", "value2-2")) - - // Tests will be run twice: once before evicting anything, and - // again after evicting the three oldest entries. - tests := []struct { - f HeaderField - beforeWantStaticI uint64 - beforeWantMatch bool - afterWantStaticI uint64 - afterWantMatch bool - }{ - {HeaderField{"key1", "value1-1", false}, 1, true, 0, false}, - {HeaderField{"key1", "value1-2", false}, 3, true, 0, false}, - {HeaderField{"key1", "value1-3", false}, 3, false, 0, false}, - {HeaderField{"key2", "value2-1", false}, 2, true, 3, false}, - {HeaderField{"key2", "value2-2", false}, 6, true, 3, true}, - {HeaderField{"key2", "value2-3", false}, 6, false, 3, false}, - {HeaderField{"key4", "value4-1", false}, 5, true, 2, true}, - // Name match only, because sensitive. - {HeaderField{"key4", "value4-1", true}, 5, false, 2, false}, - // Key not found. - {HeaderField{"key5", "value5-x", false}, 0, false, 0, false}, - } - - staticToDynamic := func(i uint64) uint64 { - if i == 0 { - return 0 - } - return uint64(table.len()) - i + 1 // dynamic is the reversed table - } - - searchStatic := func(f HeaderField) (uint64, bool) { - old := staticTable - staticTable = table - defer func() { staticTable = old }() - return staticTable.search(f) - } - - searchDynamic := func(f HeaderField) (uint64, bool) { - return table.search(f) - } - - for _, test := range tests { - gotI, gotMatch := searchStatic(test.f) - if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { - t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) - } - gotI, gotMatch = searchDynamic(test.f) - wantDynamicI := staticToDynamic(test.beforeWantStaticI) - if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { - t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) - } - } - - table.evictOldest(3) - - for _, test := range tests { - gotI, gotMatch := searchStatic(test.f) - if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { - t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) - } - gotI, gotMatch = searchDynamic(test.f) - wantDynamicI := staticToDynamic(test.afterWantStaticI) - if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { - t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) - } - } -} - -func TestHeaderFieldTable_LookupMapEviction(t *testing.T) { - table := &headerFieldTable{} - table.init() - table.addEntry(pair("key1", "value1-1")) - table.addEntry(pair("key2", "value2-1")) - table.addEntry(pair("key1", "value1-2")) - table.addEntry(pair("key3", "value3-1")) - table.addEntry(pair("key4", "value4-1")) - table.addEntry(pair("key2", "value2-2")) - - // evict all pairs - table.evictOldest(table.len()) - - if l := table.len(); l > 0 { - t.Errorf("table.len() = %d, want 0", l) - } - - if l := len(table.byName); l > 0 { - t.Errorf("len(table.byName) = %d, want 0", l) - } - - if l := len(table.byNameValue); l > 0 { - t.Errorf("len(table.byNameValue) = %d, want 0", l) - } -} - -func TestStaticTable(t *testing.T) { - fromSpec := ` - +-------+-----------------------------+---------------+ - | 1 | :authority | | - | 2 | :method | GET | - | 3 | :method | POST | - | 4 | :path | / | - | 5 | :path | /index.html | - | 6 | :scheme | http | - | 7 | :scheme | https | - | 8 | :status | 200 | - | 9 | :status | 204 | - | 10 | :status | 206 | - | 11 | :status | 304 | - | 12 | :status | 400 | - | 13 | :status | 404 | - | 14 | :status | 500 | - | 15 | accept-charset | | - | 16 | accept-encoding | gzip, deflate | - | 17 | accept-language | | - | 18 | accept-ranges | | - | 19 | accept | | - | 20 | access-control-allow-origin | | - | 21 | age | | - | 22 | allow | | - | 23 | authorization | | - | 24 | cache-control | | - | 25 | content-disposition | | - | 26 | content-encoding | | - | 27 | content-language | | - | 28 | content-length | | - | 29 | content-location | | - | 30 | content-range | | - | 31 | content-type | | - | 32 | cookie | | - | 33 | date | | - | 34 | etag | | - | 35 | expect | | - | 36 | expires | | - | 37 | from | | - | 38 | host | | - | 39 | if-match | | - | 40 | if-modified-since | | - | 41 | if-none-match | | - | 42 | if-range | | - | 43 | if-unmodified-since | | - | 44 | last-modified | | - | 45 | link | | - | 46 | location | | - | 47 | max-forwards | | - | 48 | proxy-authenticate | | - | 49 | proxy-authorization | | - | 50 | range | | - | 51 | referer | | - | 52 | refresh | | - | 53 | retry-after | | - | 54 | server | | - | 55 | set-cookie | | - | 56 | strict-transport-security | | - | 57 | transfer-encoding | | - | 58 | user-agent | | - | 59 | vary | | - | 60 | via | | - | 61 | www-authenticate | | - +-------+-----------------------------+---------------+ -` - bs := bufio.NewScanner(strings.NewReader(fromSpec)) - re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) - for bs.Scan() { - l := bs.Text() - if !strings.Contains(l, "|") { - continue - } - m := re.FindStringSubmatch(l) - if m == nil { - continue - } - i, err := strconv.Atoi(m[1]) - if err != nil { - t.Errorf("Bogus integer on line %q", l) - continue - } - if i < 1 || i > staticTable.len() { - t.Errorf("Bogus index %d on line %q", i, l) - continue - } - if got, want := staticTable.ents[i-1].Name, m[2]; got != want { - t.Errorf("header index %d name = %q; want %q", i, got, want) - } - if got, want := staticTable.ents[i-1].Value, m[3]; got != want { - t.Errorf("header index %d value = %q; want %q", i, got, want) - } - } - if err := bs.Err(); err != nil { - t.Error(err) - } -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go deleted file mode 100644 index d565f40e..00000000 --- a/vendor/golang.org/x/net/http2/http2.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package http2 implements the HTTP/2 protocol. -// -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) -// -// See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. -// -package http2 // import "golang.org/x/net/http2" - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" - - "golang.org/x/net/lex/httplex" -) - -var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool -) - -func init() { - e := os.Getenv("GODEBUG") - if strings.Contains(e, "http2debug=1") { - VerboseLogs = true - } - if strings.Contains(e, "http2debug=2") { - VerboseLogs = true - logFrameWrites = true - logFrameReads = true - } -} - -const ( - // ClientPreface is the string that must be sent by new - // connections from clients. - ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" - - // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 - initialMaxFrameSize = 16384 - - // NextProtoTLS is the NPN/ALPN protocol negotiated during - // HTTP/2's TLS setup. - NextProtoTLS = "h2" - - // http://http2.github.io/http2-spec/#SettingValues - initialHeaderTableSize = 4096 - - initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size - - defaultMaxReadFrameSize = 1 << 20 -) - -var ( - clientPreface = []byte(ClientPreface) -) - -type streamState int - -// HTTP/2 stream states. -// -// See http://tools.ietf.org/html/rfc7540#section-5.1. -// -// For simplicity, the server code merges "reserved (local)" into -// "half-closed (remote)". This is one less state transition to track. -// The only downside is that we send PUSH_PROMISEs slightly less -// liberally than allowable. More discussion here: -// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html -// -// "reserved (remote)" is omitted since the client code does not -// support server push. -const ( - stateIdle streamState = iota - stateOpen - stateHalfClosedLocal - stateHalfClosedRemote - stateClosed -) - -var stateName = [...]string{ - stateIdle: "Idle", - stateOpen: "Open", - stateHalfClosedLocal: "HalfClosedLocal", - stateHalfClosedRemote: "HalfClosedRemote", - stateClosed: "Closed", -} - -func (st streamState) String() string { - return stateName[st] -} - -// Setting is a setting parameter: which setting it is, and its value. -type Setting struct { - // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues - ID SettingID - - // Val is the value. - Val uint32 -} - -func (s Setting) String() string { - return fmt.Sprintf("[%v = %d]", s.ID, s.Val) -} - -// Valid reports whether the setting is valid. -func (s Setting) Valid() error { - // Limits and error codes from 6.5.2 Defined SETTINGS Parameters - switch s.ID { - case SettingEnablePush: - if s.Val != 1 && s.Val != 0 { - return ConnectionError(ErrCodeProtocol) - } - case SettingInitialWindowSize: - if s.Val > 1<<31-1 { - return ConnectionError(ErrCodeFlowControl) - } - case SettingMaxFrameSize: - if s.Val < 16384 || s.Val > 1<<24-1 { - return ConnectionError(ErrCodeProtocol) - } - } - return nil -} - -// A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings -type SettingID uint16 - -const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 -) - -var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", -} - -func (s SettingID) String() string { - if v, ok := settingName[s]; ok { - return v - } - return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) -} - -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - -// validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httplex.ValidHeaderName for the base rules. -// -// Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " -func validWireHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !httplex.IsTokenRune(r) { - return false - } - if 'A' <= r && r <= 'Z' { - return false - } - } - return true -} - -var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := http.StatusText(i); v != "" { - httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - -func httpCodeString(code int) string { - if s, ok := httpCodeStringCommon[code]; ok { - return s - } - return strconv.Itoa(code) -} - -// from pkg io -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - -// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). -type closeWaiter chan struct{} - -// Init makes a closeWaiter usable. -// It exists because so a closeWaiter value can be placed inside a -// larger struct and have the Mutex and Cond's memory in the same -// allocation. -func (cw *closeWaiter) Init() { - *cw = make(chan struct{}) -} - -// Close marks the closeWaiter as closed and unblocks any waiters. -func (cw closeWaiter) Close() { - close(cw) -} - -// Wait waits for the closeWaiter to become closed. -func (cw closeWaiter) Wait() { - <-cw -} - -// bufferedWriter is a buffered writer that writes to w. -// Its buffered writer is lazily allocated as needed, to minimize -// idle memory usage with many connections. -type bufferedWriter struct { - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered -} - -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} -} - -// bufWriterPoolBufferSize is the size of bufio.Writer's -// buffers created using bufWriterPool. -// -// TODO: pick a less arbitrary value? this is a bit under -// (3 x typical 1500 byte MTU) at least. Other than that, -// not much thought went into it. -const bufWriterPoolBufferSize = 4 << 10 - -var bufWriterPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) - }, -} - -func (w *bufferedWriter) Available() int { - if w.bw == nil { - return bufWriterPoolBufferSize - } - return w.bw.Available() -} - -func (w *bufferedWriter) Write(p []byte) (n int, err error) { - if w.bw == nil { - bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) - w.bw = bw - } - return w.bw.Write(p) -} - -func (w *bufferedWriter) Flush() error { - bw := w.bw - if bw == nil { - return nil - } - err := bw.Flush() - bw.Reset(nil) - bufWriterPool.Put(bw) - w.bw = nil - return err -} - -func mustUint31(v int32) uint32 { - if v < 0 || v > 2147483647 { - panic("out of range") - } - return uint32(v) -} - -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -type httpError struct { - msg string - timeout bool -} - -func (e *httpError) Error() string { return e.msg } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } - -var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} - -type connectionStater interface { - ConnectionState() tls.ConnectionState -} - -var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} - -type sorter struct { - v []string // owned by sorter -} - -func (s *sorter) Len() int { return len(s.v) } -func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } -func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } - -// Keys returns the sorted keys of h. -// -// The returned slice is only valid until s used again or returned to -// its pool. -func (s *sorter) Keys(h http.Header) []string { - keys := s.v[:0] - for k := range h { - keys = append(keys, k) - } - s.v = keys - sort.Sort(s) - return keys -} - -func (s *sorter) SortStrings(ss []string) { - // Our sorter works on s.v, which sorter owns, so - // stash it away while we sort the user's buffer. - save := s.v - s.v = ss - sort.Sort(s) - s.v = save -} - -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// *) a non-empty string starting with '/' -// *) the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} diff --git a/vendor/golang.org/x/net/http2/http2_test.go b/vendor/golang.org/x/net/http2/http2_test.go deleted file mode 100644 index 52487764..00000000 --- a/vendor/golang.org/x/net/http2/http2_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "errors" - "flag" - "fmt" - "net/http" - "os/exec" - "strconv" - "strings" - "testing" - - "golang.org/x/net/http2/hpack" -) - -var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.") - -func condSkipFailingTest(t *testing.T) { - if !*knownFailing { - t.Skip("Skipping known-failing test without --known_failing") - } -} - -func init() { - inTests = true - DebugGoroutines = true - flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging") -} - -func TestSettingString(t *testing.T) { - tests := []struct { - s Setting - want string - }{ - {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"}, - {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"}, - } - for i, tt := range tests { - got := fmt.Sprint(tt.s) - if got != tt.want { - t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want) - } - } -} - -type twriter struct { - t testing.TB - st *serverTester // optional -} - -func (w twriter) Write(p []byte) (n int, err error) { - if w.st != nil { - ps := string(p) - for _, phrase := range w.st.logFilter { - if strings.Contains(ps, phrase) { - return len(p), nil // no logging - } - } - } - w.t.Logf("%s", p) - return len(p), nil -} - -// like encodeHeader, but don't add implicit pseudo headers. -func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - for len(headers) > 0 { - k, v := headers[0], headers[1] - headers = headers[2:] - if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil { - t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) - } - } - return buf.Bytes() -} - -// Verify that curl has http2. -func requireCurl(t *testing.T) { - out, err := dockerLogs(curl(t, "--version")) - if err != nil { - t.Skipf("failed to determine curl features; skipping test") - } - if !strings.Contains(string(out), "HTTP2") { - t.Skip("curl doesn't support HTTP2; skipping test") - } -} - -func curl(t *testing.T, args ...string) (container string) { - out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output() - if err != nil { - t.Skipf("Failed to run curl in docker: %v, %s", err, out) - } - return strings.TrimSpace(string(out)) -} - -// Verify that h2load exists. -func requireH2load(t *testing.T) { - out, err := dockerLogs(h2load(t, "--version")) - if err != nil { - t.Skipf("failed to probe h2load; skipping test: %s", out) - } - if !strings.Contains(string(out), "h2load nghttp2/") { - t.Skipf("h2load not present; skipping test. (Output=%q)", out) - } -} - -func h2load(t *testing.T, args ...string) (container string) { - out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output() - if err != nil { - t.Skipf("Failed to run h2load in docker: %v, %s", err, out) - } - return strings.TrimSpace(string(out)) -} - -type puppetCommand struct { - fn func(w http.ResponseWriter, r *http.Request) - done chan<- bool -} - -type handlerPuppet struct { - ch chan puppetCommand -} - -func newHandlerPuppet() *handlerPuppet { - return &handlerPuppet{ - ch: make(chan puppetCommand), - } -} - -func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) { - for cmd := range p.ch { - cmd.fn(w, r) - cmd.done <- true - } -} - -func (p *handlerPuppet) done() { close(p.ch) } -func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) { - done := make(chan bool) - p.ch <- puppetCommand{fn, done} - <-done -} -func dockerLogs(container string) ([]byte, error) { - out, err := exec.Command("docker", "wait", container).CombinedOutput() - if err != nil { - return out, err - } - exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out))) - if err != nil { - return out, errors.New("unexpected exit status from docker wait") - } - out, err = exec.Command("docker", "logs", container).CombinedOutput() - exec.Command("docker", "rm", container).Run() - if err == nil && exitStatus != 0 { - err = fmt.Errorf("exit status %d: %s", exitStatus, out) - } - return out, err -} - -func kill(container string) { - exec.Command("docker", "kill", container).Run() - exec.Command("docker", "rm", container).Run() -} - -func cleanDate(res *http.Response) { - if d := res.Header["Date"]; len(d) == 1 { - d[0] = "XXX" - } -} - -func TestSorterPoolAllocs(t *testing.T) { - ss := []string{"a", "b", "c"} - h := http.Header{ - "a": nil, - "b": nil, - "c": nil, - } - sorter := new(sorter) - - if allocs := testing.AllocsPerRun(100, func() { - sorter.SortStrings(ss) - }); allocs >= 1 { - t.Logf("SortStrings allocs = %v; want <1", allocs) - } - - if allocs := testing.AllocsPerRun(5, func() { - if len(sorter.Keys(h)) != 3 { - t.Fatal("wrong result") - } - }); allocs > 0 { - t.Logf("Keys allocs = %v; want <1", allocs) - } -} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go deleted file mode 100644 index 508cebcc..00000000 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.6 - -package http2 - -import ( - "net/http" - "time" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - return nil, errTransportVersion -} - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return 0 - -} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go deleted file mode 100644 index 140434a7..00000000 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package http2 - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -type contextContext interface { - Done() <-chan struct{} - Err() error -} - -type fakeContext struct{} - -func (fakeContext) Done() <-chan struct{} { return nil } -func (fakeContext) Err() error { panic("should not be called") } - -func reqContext(r *http.Request) fakeContext { - return fakeContext{} -} - -func setResponseUncompressed(res *http.Response) { - // Nothing. -} - -type clientTrace struct{} - -func requestTrace(*http.Request) *clientTrace { return nil } -func traceGotConn(*http.Request, *ClientConn) {} -func traceFirstResponseByte(*clientTrace) {} -func traceWroteHeaders(*clientTrace) {} -func traceWroteRequest(*clientTrace, error) {} -func traceGot100Continue(trace *clientTrace) {} -func traceWait100Continue(trace *clientTrace) {} - -func nop() {} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - return nil, nop -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return ctx, nop -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req -} - -// temporary copy of Go 1.6's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} - -func (cc *ClientConn) Ping(ctx contextContext) error { - return cc.ping(ctx) -} - -func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go deleted file mode 100644 index 6f8d3f86..00000000 --- a/vendor/golang.org/x/net/http2/not_go18.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package http2 - -import ( - "io" - "net/http" -) - -func configureServer18(h1 *http.Server, h2 *Server) error { - // No IdleTimeout to sync prior to Go 1.8. - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return nil -} - -func reqBodyIsNoBody(io.ReadCloser) bool { return false } - -func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go deleted file mode 100644 index 5ae07726..00000000 --- a/vendor/golang.org/x/net/http2/not_go19.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package http2 - -import ( - "net/http" -) - -func configureServer19(s *http.Server, conf *Server) error { - // not supported prior to go1.9 - return nil -} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go deleted file mode 100644 index a6140099..00000000 --- a/vendor/golang.org/x/net/http2/pipe.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "io" - "sync" -) - -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like -// io.Pipe except there are no PipeReader/PipeWriter halves, and the -// underlying buffer is an interface. (io.Pipe is always unbuffered) -type pipe struct { - mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer // nil when done reading - err error // read error once empty. non-nil means closed. - breakErr error // immediate read error (caller doesn't see rest of b) - donec chan struct{} // closed on error - readFn func() // optional code to run in Read before error -} - -type pipeBuffer interface { - Len() int - io.Writer - io.Reader -} - -func (p *pipe) Len() int { - p.mu.Lock() - defer p.mu.Unlock() - if p.b == nil { - return 0 - } - return p.b.Len() -} - -// Read waits until data is available and copies bytes -// from the buffer into p. -func (p *pipe) Read(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - for { - if p.breakErr != nil { - return 0, p.breakErr - } - if p.b != nil && p.b.Len() > 0 { - return p.b.Read(d) - } - if p.err != nil { - if p.readFn != nil { - p.readFn() // e.g. copy trailers - p.readFn = nil // not sticky like p.err - } - p.b = nil - return 0, p.err - } - p.c.Wait() - } -} - -var errClosedPipeWrite = errors.New("write on closed buffer") - -// Write copies bytes from p into the buffer and wakes a reader. -// It is an error to write more data than the buffer can hold. -func (p *pipe) Write(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if p.err != nil { - return 0, errClosedPipeWrite - } - if p.breakErr != nil { - return len(d), nil // discard when there is no reader - } - return p.b.Write(d) -} - -// CloseWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err after all data has been -// read. -// -// The error must be non-nil. -func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } - -// BreakWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err immediately, without -// waiting for unread data. -func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } - -// closeWithErrorAndCode is like CloseWithError but also sets some code to run -// in the caller's goroutine before returning the error. -func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } - -func (p *pipe) closeWithError(dst *error, err error, fn func()) { - if err == nil { - panic("err must be non-nil") - } - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if *dst != nil { - // Already been done. - return - } - p.readFn = fn - if dst == &p.breakErr { - p.b = nil - } - *dst = err - p.closeDoneLocked() -} - -// requires p.mu be held. -func (p *pipe) closeDoneLocked() { - if p.donec == nil { - return - } - // Close if unclosed. This isn't racy since we always - // hold p.mu while closing. - select { - case <-p.donec: - default: - close(p.donec) - } -} - -// Err returns the error (if any) first set by BreakWithError or CloseWithError. -func (p *pipe) Err() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.breakErr != nil { - return p.breakErr - } - return p.err -} - -// Done returns a channel which is closed if and when this pipe is closed -// with CloseWithError. -func (p *pipe) Done() <-chan struct{} { - p.mu.Lock() - defer p.mu.Unlock() - if p.donec == nil { - p.donec = make(chan struct{}) - if p.err != nil || p.breakErr != nil { - // Already hit an error. - p.closeDoneLocked() - } - } - return p.donec -} diff --git a/vendor/golang.org/x/net/http2/pipe_test.go b/vendor/golang.org/x/net/http2/pipe_test.go deleted file mode 100644 index 1bf351ff..00000000 --- a/vendor/golang.org/x/net/http2/pipe_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "testing" -) - -func TestPipeClose(t *testing.T) { - var p pipe - p.b = new(bytes.Buffer) - a := errors.New("a") - b := errors.New("b") - p.CloseWithError(a) - p.CloseWithError(b) - _, err := p.Read(make([]byte, 1)) - if err != a { - t.Errorf("err = %v want %v", err, a) - } -} - -func TestPipeDoneChan(t *testing.T) { - var p pipe - done := p.Done() - select { - case <-done: - t.Fatal("done too soon") - default: - } - p.CloseWithError(io.EOF) - select { - case <-done: - default: - t.Fatal("should be done") - } -} - -func TestPipeDoneChan_ErrFirst(t *testing.T) { - var p pipe - p.CloseWithError(io.EOF) - done := p.Done() - select { - case <-done: - default: - t.Fatal("should be done") - } -} - -func TestPipeDoneChan_Break(t *testing.T) { - var p pipe - done := p.Done() - select { - case <-done: - t.Fatal("done too soon") - default: - } - p.BreakWithError(io.EOF) - select { - case <-done: - default: - t.Fatal("should be done") - } -} - -func TestPipeDoneChan_Break_ErrFirst(t *testing.T) { - var p pipe - p.BreakWithError(io.EOF) - done := p.Done() - select { - case <-done: - default: - t.Fatal("should be done") - } -} - -func TestPipeCloseWithError(t *testing.T) { - p := &pipe{b: new(bytes.Buffer)} - const body = "foo" - io.WriteString(p, body) - a := errors.New("test error") - p.CloseWithError(a) - all, err := ioutil.ReadAll(p) - if string(all) != body { - t.Errorf("read bytes = %q; want %q", all, body) - } - if err != a { - t.Logf("read error = %v, %v", err, a) - } - // Read and Write should fail. - if n, err := p.Write([]byte("abc")); err != errClosedPipeWrite || n != 0 { - t.Errorf("Write(abc) after close\ngot %v, %v\nwant 0, %v", n, err, errClosedPipeWrite) - } - if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { - t.Errorf("Read() after close\ngot %v, nil\nwant 0, %v", n, errClosedPipeWrite) - } -} - -func TestPipeBreakWithError(t *testing.T) { - p := &pipe{b: new(bytes.Buffer)} - io.WriteString(p, "foo") - a := errors.New("test err") - p.BreakWithError(a) - all, err := ioutil.ReadAll(p) - if string(all) != "" { - t.Errorf("read bytes = %q; want empty string", all) - } - if err != a { - t.Logf("read error = %v, %v", err, a) - } - if p.b != nil { - t.Errorf("buffer should be nil after BreakWithError") - } - // Write should succeed silently. - if n, err := p.Write([]byte("abc")); err != nil || n != 3 { - t.Errorf("Write(abc) after break\ngot %v, %v\nwant 0, nil", n, err) - } - if p.b != nil { - t.Errorf("buffer should be nil after Write") - } - // Read should fail. - if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { - t.Errorf("Read() after close\ngot %v, nil\nwant 0, not nil", n) - } -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go deleted file mode 100644 index eae143dd..00000000 --- a/vendor/golang.org/x/net/http2/server.go +++ /dev/null @@ -1,2857 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: turn off the serve goroutine when idle, so -// an idle conn only has the readFrames goroutine active. (which could -// also be optimized probably to pin less memory in crypto/tls). This -// would involve tracking when the serve goroutine is active (atomic -// int32 read/CAS probably?) and starting it up when frames arrive, -// and shutting it down when all handlers exit. the occasional PING -// packets could use time.AfterFunc to call sc.wakeStartServeLoop() -// (which is a no-op if already running) and then queue the PING write -// as normal. The serve loop would then exit in most cases (if no -// Handlers running) and not be woken up again until the PING packet -// returns. - -// TODO (maybe): add a mechanism for Handlers to going into -// half-closed-local mode (rw.(io.Closer) test?) but not exit their -// handler, and continue to be able to read from the -// Request.Body. This would be a somewhat semantic change from HTTP/1 -// (or at least what we expose in net/http), so I'd probably want to -// add it there too. For now, this package says that returning from -// the Handler ServeHTTP function means you're both done reading and -// done writing, without a way to stop just one or the other. - -package http2 - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "math" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" -) - -const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? -) - -var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") -) - -var responseWriterStatePool = sync.Pool{ - New: func() interface{} { - rws := &responseWriterState{} - rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) - return rws - }, -} - -// Test hooks. -var ( - testHookOnConn func() - testHookGetServerConn func(*serverConn) - testHookOnPanicMu *sync.Mutex // nil except in tests - testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) -) - -// Server is an HTTP/2 server. -type Server struct { - // MaxHandlers limits the number of http.Handler ServeHTTP goroutines - // which may run at a time over all connections. - // Negative or zero no limit. - // TODO: implement - MaxHandlers int - - // MaxConcurrentStreams optionally specifies the number of - // concurrent streams that each client may have open at a - // time. This is unrelated to the number of http.Handler goroutines - // which may be active globally, which is MaxHandlers. - // If zero, MaxConcurrentStreams defaults to at least 100, per - // the HTTP/2 spec's recommendations. - MaxConcurrentStreams uint32 - - // MaxReadFrameSize optionally specifies the largest frame - // this server is willing to read. A valid value is between - // 16k and 16M, inclusive. If zero or otherwise invalid, a - // default value is used. - MaxReadFrameSize uint32 - - // PermitProhibitedCipherSuites, if true, permits the use of - // cipher suites prohibited by the HTTP/2 spec. - PermitProhibitedCipherSuites bool - - // IdleTimeout specifies how long until idle clients should be - // closed with a GOAWAY frame. PING frames are not considered - // activity for the purposes of IdleTimeout. - IdleTimeout time.Duration - - // MaxUploadBufferPerConnection is the size of the initial flow - // control window for each connections. The HTTP/2 spec does not - // allow this to be smaller than 65535 or larger than 2^32-1. - // If the value is outside this range, a default value will be - // used instead. - MaxUploadBufferPerConnection int32 - - // MaxUploadBufferPerStream is the size of the initial flow control - // window for each stream. The HTTP/2 spec does not allow this to - // be larger than 2^32-1. If the value is zero or larger than the - // maximum, a default value will be used instead. - MaxUploadBufferPerStream int32 - - // NewWriteScheduler constructs a write scheduler for a connection. - // If nil, a default scheduler is chosen. - NewWriteScheduler func() WriteScheduler - - // Internal state. This is a pointer (rather than embedded directly) - // so that we don't embed a Mutex in this struct, which will make the - // struct non-copyable, which might break some callers. - state *serverInternalState -} - -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection > initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -type serverInternalState struct { - mu sync.Mutex - activeConns map[*serverConn]struct{} -} - -func (s *serverInternalState) registerConn(sc *serverConn) { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - s.activeConns[sc] = struct{}{} - s.mu.Unlock() -} - -func (s *serverInternalState) unregisterConn(sc *serverConn) { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - delete(s.activeConns, sc) - s.mu.Unlock() -} - -func (s *serverInternalState) startGracefulShutdown() { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - for sc := range s.activeConns { - sc.startGracefulShutdown() - } - s.mu.Unlock() -} - -// ConfigureServer adds HTTP/2 support to a net/http Server. -// -// The configuration conf may be nil. -// -// ConfigureServer must be called before s begins serving. -func ConfigureServer(s *http.Server, conf *Server) error { - if s == nil { - panic("nil *http.Server") - } - if conf == nil { - conf = new(Server) - } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} - if err := configureServer18(s, conf); err != nil { - return err - } - if err := configureServer19(s, conf); err != nil { - return err - } - - if s.TLSConfig == nil { - s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256. - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { - if cs == requiredCipher { - haveRequired = true - } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } - } - if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") - } - } - - // Note: not setting MinVersion to tls.VersionTLS12, - // as we don't want to interfere with HTTP/1.1 traffic - // on the user's server. We enforce TLS 1.2 later once - // we accept a connection. Ideally this should be done - // during next-proto selection, but using TLS <1.2 with - // HTTP/2 is still the client's bug. - - s.TLSConfig.PreferServerCipherSuites = true - - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) - } - - if s.TLSNextProto == nil { - s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} - } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { - if testHookOnConn != nil { - testHookOnConn() - } - conf.ServeConn(c, &ServeConnOpts{ - Handler: h, - BaseConfig: hs, - }) - } - s.TLSNextProto[NextProtoTLS] = protoHandler - return nil -} - -// ServeConnOpts are options for the Server.ServeConn method. -type ServeConnOpts struct { - // BaseConfig optionally sets the base configuration - // for values. If nil, defaults are used. - BaseConfig *http.Server - - // Handler specifies which handler to use for processing - // requests. If nil, BaseConfig.Handler is used. If BaseConfig - // or BaseConfig.Handler is nil, http.DefaultServeMux is used. - Handler http.Handler -} - -func (o *ServeConnOpts) baseConfig() *http.Server { - if o != nil && o.BaseConfig != nil { - return o.BaseConfig - } - return new(http.Server) -} - -func (o *ServeConnOpts) handler() http.Handler { - if o != nil { - if o.Handler != nil { - return o.Handler - } - if o.BaseConfig != nil && o.BaseConfig.Handler != nil { - return o.BaseConfig.Handler - } - } - return http.DefaultServeMux -} - -// ServeConn serves HTTP/2 requests on the provided connection and -// blocks until the connection is no longer readable. -// -// ServeConn starts speaking HTTP/2 assuming that c has not had any -// reads or writes. It writes its initial settings frame and expects -// to be able to read the preface and settings frame from the -// client. If c has a ConnectionState method like a *tls.Conn, the -// ConnectionState is used to verify the TLS ciphersuite and to set -// the Request.TLS field in Handlers. -// -// ServeConn does not support h2c by itself. Any h2c support must be -// implemented in terms of providing a suitably-behaving net.Conn. -// -// The opts parameter is optional. If nil, default values are used. -func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { - baseCtx, cancel := serverConnBaseContext(c, opts) - defer cancel() - - sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan FrameWriteRequest, 8), - serveMsgCh: make(chan interface{}, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), - initialStreamSendWindowSize: initialWindowSize, - maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } - - s.state.registerConn(sc) - defer s.state.unregisterConn(sc) - - // The net/http package sets the write deadline from the - // http.Server.WriteTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already set. - // Write deadlines are set per stream in serverConn.newStream. - // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { - sc.conn.SetWriteDeadline(time.Time{}) - } - - if s.NewWriteScheduler != nil { - sc.writeSched = s.NewWriteScheduler() - } else { - sc.writeSched = NewRandomWriteScheduler() - } - - // These start at the RFC-specified defaults. If there is a higher - // configured value for inflow, that will be updated when we send a - // WINDOW_UPDATE shortly after sending SETTINGS. - sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) - sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - - fr := NewFramer(sc.bw, c) - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) - sc.framer = fr - - if tc, ok := c.(connectionStater); ok { - sc.tlsState = new(tls.ConnectionState) - *sc.tlsState = tc.ConnectionState() - // 9.2 Use of TLS Features - // An implementation of HTTP/2 over TLS MUST use TLS - // 1.2 or higher with the restrictions on feature set - // and cipher suite described in this section. Due to - // implementation limitations, it might not be - // possible to fail TLS negotiation. An endpoint MUST - // immediately terminate an HTTP/2 connection that - // does not meet the TLS requirements described in - // this section with a connection error (Section - // 5.4.1) of type INADEQUATE_SECURITY. - if sc.tlsState.Version < tls.VersionTLS12 { - sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") - return - } - - if sc.tlsState.ServerName == "" { - // Client must use SNI, but we don't enforce that anymore, - // since it was causing problems when connecting to bare IP - // addresses during development. - // - // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the the ServerName matches the :authority? - // But that precludes proxy situations, perhaps. - // - // So for now, do nothing here again. - } - - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { - // "Endpoints MAY choose to generate a connection error - // (Section 5.4.1) of type INADEQUATE_SECURITY if one of - // the prohibited cipher suites are negotiated." - // - // We choose that. In my opinion, the spec is weak - // here. It also says both parties must support at least - // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no - // excuses here. If we really must, we could allow an - // "AllowInsecureWeakCiphers" option on the server later. - // Let's see how it plays out first. - sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) - return - } - } - - if hook := testHookGetServerConn; hook != nil { - hook(sc) - } - sc.serve() -} - -func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) - // ignoring errors. hanging up anyway. - sc.framer.WriteGoAway(0, err, []byte(debug)) - sc.bw.Flush() - sc.conn.Close() -} - -type serverConn struct { - // Immutable: - srv *Server - hs *http.Server - conn net.Conn - bw *bufferedWriter // writing to conn - handler http.Handler - baseCtx contextContext - framer *Framer - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http - remoteAddrStr string - writeSched WriteScheduler - - // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream - initialStreamSendWindowSize int32 - maxFrameSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started writing a frame (on serve goroutine or separate) - writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - inGoAway bool // we've started to or sent GOAWAY - inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused - - // Owned by the writeFrameAsync goroutine: - headerWriteBuf bytes.Buffer - hpackEncoder *hpack.Encoder - - // Used by startGracefulShutdown. - shutdownOnce sync.Once -} - -func (sc *serverConn) maxHeaderListSize() uint32 { - n := sc.hs.MaxHeaderBytes - if n <= 0 { - n = http.DefaultMaxHeaderBytes - } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) -} - -func (sc *serverConn) curOpenStreams() uint32 { - sc.serveG.check() - return sc.curClientStreams + sc.curPushedStreams -} - -// stream represents a stream. This is the minimal metadata needed by -// the serve goroutine. Most of the actual stream state is owned by -// the http.Handler's goroutine in the responseWriter. Because the -// responseWriter's responseWriterState is recycled at the end of a -// handler, this struct intentionally has no pointer to the -// *responseWriter{,State} itself, as the Handler ending nils out the -// responseWriter's state field. -type stream struct { - // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state - ctx contextContext - cancelCtx func() - - // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 - state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - writeDeadline *time.Timer // nil if unused - - trailer http.Header // accumulated trailers - reqTrailer http.Header // handler's Request.Trailer -} - -func (sc *serverConn) Framer() *Framer { return sc.framer } -func (sc *serverConn) CloseConn() error { return sc.conn.Close() } -func (sc *serverConn) Flush() error { return sc.bw.Flush() } -func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { - return sc.hpackEncoder, &sc.headerWriteBuf -} - -func (sc *serverConn) state(streamID uint32) (streamState, *stream) { - sc.serveG.check() - // http://tools.ietf.org/html/rfc7540#section-5.1 - if st, ok := sc.streams[streamID]; ok { - return st.state, st - } - // "The first use of a new stream identifier implicitly closes all - // streams in the "idle" state that might have been initiated by - // that peer with a lower-valued stream identifier. For example, if - // a client sends a HEADERS frame on stream 7 without ever sending a - // frame on stream 5, then stream 5 transitions to the "closed" - // state when the first frame for stream 7 is sent or received." - if streamID%2 == 1 { - if streamID <= sc.maxClientStreamID { - return stateClosed, nil - } - } else { - if streamID <= sc.maxPushPromiseID { - return stateClosed, nil - } - } - return stateIdle, nil -} - -// setConnState calls the net/http ConnState hook for this connection, if configured. -// Note that the net/http package does StateNew and StateClosed for us. -// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. -func (sc *serverConn) setConnState(state http.ConnState) { - if sc.hs.ConnState != nil { - sc.hs.ConnState(sc.conn, state) - } -} - -func (sc *serverConn) vlogf(format string, args ...interface{}) { - if VerboseLogs { - sc.logf(format, args...) - } -} - -func (sc *serverConn) logf(format string, args ...interface{}) { - if lg := sc.hs.ErrorLog; lg != nil { - lg.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// errno returns v's underlying uintptr, else 0. -// -// TODO: remove this helper function once http2 can use build -// tags. See comment in isClosedConnError. -func errno(v error) uintptr { - if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { - return uintptr(rv.Uint()) - } - return 0 -} - -// isClosedConnError reports whether err is an error from use of a closed -// network connection. -func isClosedConnError(err error) bool { - if err == nil { - return false - } - - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { - return true - } - - // TODO(bradfitz): x/tools/cmd/bundle doesn't really support - // build tags, so I can't make an http2_windows.go file with - // Windows-specific stuff. Fix that and move this, once we - // have a way to bundle this into std's net/http somehow. - if runtime.GOOS == "windows" { - if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { - if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { - const WSAECONNABORTED = 10053 - const WSAECONNRESET = 10054 - if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { - return true - } - } - } - } - return false -} - -func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { - if err == nil { - return - } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { - // Boring, expected errors. - sc.vlogf(format, args...) - } else { - sc.logf(format, args...) - } -} - -func (sc *serverConn) canonicalHeader(v string) string { - sc.serveG.check() - cv, ok := commonCanonHeader[v] - if ok { - return cv - } - cv, ok = sc.canonHeader[v] - if ok { - return cv - } - if sc.canonHeader == nil { - sc.canonHeader = make(map[string]string) - } - cv = http.CanonicalHeaderKey(v) - sc.canonHeader[v] = cv - return cv -} - -type readFrameResult struct { - f Frame // valid until readMore is called - err error - - // readMore should be called once the consumer no longer needs or - // retains f. After readMore, f is invalid and more frames can be - // read. - readMore func() -} - -// readFrames is the loop that reads incoming frames. -// It takes care to only read one frame at a time, blocking until the -// consumer is done with the frame. -// It's run on its own goroutine. -func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done - for { - f, err := sc.framer.ReadFrame() - select { - case sc.readFrameCh <- readFrameResult{f, err, gateDone}: - case <-sc.doneServing: - return - } - select { - case <-gate: - case <-sc.doneServing: - return - } - if terminalReadFrameError(err) { - return - } - } -} - -// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. -type frameWriteResult struct { - wr FrameWriteRequest // what was written (or attempted) - err error // result of the writeFrame call -} - -// writeFrameAsync runs in its own goroutine and writes a single frame -// and then reports when it's done. -// At most one goroutine can be running writeFrameAsync at a time per -// serverConn. -func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { - err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} -} - -func (sc *serverConn) closeAllStreamsOnConnClose() { - sc.serveG.check() - for _, st := range sc.streams { - sc.closeStream(st, errClientDisconnected) - } -} - -func (sc *serverConn) stopShutdownTimer() { - sc.serveG.check() - if t := sc.shutdownTimer; t != nil { - t.Stop() - } -} - -func (sc *serverConn) notePanic() { - // Note: this is for serverConn.serve panicking, not http.Handler code. - if testHookOnPanicMu != nil { - testHookOnPanicMu.Lock() - defer testHookOnPanicMu.Unlock() - } - if testHookOnPanic != nil { - if e := recover(); e != nil { - if testHookOnPanic(sc, e) { - panic(e) - } - } - } -} - -func (sc *serverConn) serve() { - sc.serveG.check() - defer sc.notePanic() - defer sc.conn.Close() - defer sc.closeAllStreamsOnConnClose() - defer sc.stopShutdownTimer() - defer close(sc.doneServing) // unblocks handlers trying to send - - if VerboseLogs { - sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) - } - - sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, - }, - }) - sc.unackedSettings++ - - // Each connection starts with intialWindowSize inflow tokens. - // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { - sc.sendWindowUpdate(nil, int(diff)) - } - - if err := sc.readPreface(); err != nil { - sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) - return - } - // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. - // Active means we read some data and anticipate a request. We'll - // do another Active when we get a HEADERS frame. - sc.setConnState(http.StateActive) - sc.setConnState(http.StateIdle) - - if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) - defer sc.idleTimer.Stop() - } - - go sc.readFrames() // closed by defer sc.conn.Close above - - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) - defer settingsTimer.Stop() - - loopNum := 0 - for { - loopNum++ - select { - case wr := <-sc.wantWriteFrameCh: - if se, ok := wr.write.(StreamError); ok { - sc.resetStream(se) - break - } - sc.writeFrame(wr) - case res := <-sc.wroteFrameCh: - sc.wroteFrame(res) - case res := <-sc.readFrameCh: - if !sc.processFrameFromReader(res) { - return - } - res.readMore() - if settingsTimer != nil { - settingsTimer.Stop() - settingsTimer = nil - } - case m := <-sc.bodyReadCh: - sc.noteBodyRead(m.st, m.n) - case msg := <-sc.serveMsgCh: - switch v := msg.(type) { - case func(int): - v(loopNum) // for testing - case *serverMessage: - switch v { - case settingsTimerMsg: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case idleTimerMsg: - sc.vlogf("connection is idle") - sc.goAway(ErrCodeNo) - case shutdownTimerMsg: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case gracefulShutdownMsg: - sc.startGracefulShutdownInternal() - default: - panic("unknown timer") - } - case *startPushRequest: - sc.startPush(v) - default: - panic(fmt.Sprintf("unexpected type %T", v)) - } - } - - if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { - return - } - } -} - -func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { - select { - case <-sc.doneServing: - case <-sharedCh: - close(privateCh) - } -} - -type serverMessage int - -// Message values sent to serveMsgCh. -var ( - settingsTimerMsg = new(serverMessage) - idleTimerMsg = new(serverMessage) - shutdownTimerMsg = new(serverMessage) - gracefulShutdownMsg = new(serverMessage) -) - -func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } -func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } -func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } - -func (sc *serverConn) sendServeMsg(msg interface{}) { - sc.serveG.checkNotOn() // NOT - select { - case sc.serveMsgCh <- msg: - case <-sc.doneServing: - } -} - -// readPreface reads the ClientPreface greeting from the peer -// or returns an error on timeout or an invalid greeting. -func (sc *serverConn) readPreface() error { - errc := make(chan error, 1) - go func() { - // Read the client preface - buf := make([]byte, len(ClientPreface)) - if _, err := io.ReadFull(sc.conn, buf); err != nil { - errc <- err - } else if !bytes.Equal(buf, clientPreface) { - errc <- fmt.Errorf("bogus greeting %q", buf) - } else { - errc <- nil - } - }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? - defer timer.Stop() - select { - case <-timer.C: - return errors.New("timeout waiting for client preface") - case err := <-errc: - if err == nil { - if VerboseLogs { - sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) - } - } - return err - } -} - -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -var writeDataPool = sync.Pool{ - New: func() interface{} { return new(writeData) }, -} - -// writeDataFromHandler writes DATA response frames from a handler on -// the given stream. -func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) - writeArg := writeDataPool.Get().(*writeData) - *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: writeArg, - stream: stream, - done: ch, - }) - if err != nil { - return err - } - var frameWriteDone bool // the frame write is done (successfully or not) - select { - case err = <-ch: - frameWriteDone = true - case <-sc.doneServing: - return errClientDisconnected - case <-stream.cw: - // If both ch and stream.cw were ready (as might - // happen on the final Write after an http.Handler - // ends), prefer the write result. Otherwise this - // might just be us successfully closing the stream. - // The writeFrameAsync and serve goroutines guarantee - // that the ch send will happen before the stream.cw - // close. - select { - case err = <-ch: - frameWriteDone = true - default: - return errStreamClosed - } - } - errChanPool.Put(ch) - if frameWriteDone { - writeDataPool.Put(writeArg) - } - return err -} - -// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts -// if the connection has gone away. -// -// This must not be run from the serve goroutine itself, else it might -// deadlock writing to sc.wantWriteFrameCh (which is only mildly -// buffered and is read by serve itself). If you're on the serve -// goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { - sc.serveG.checkNotOn() // NOT - select { - case sc.wantWriteFrameCh <- wr: - return nil - case <-sc.doneServing: - // Serve loop is gone. - // Client has closed their connection to the server. - return errClientDisconnected - } -} - -// writeFrame schedules a frame to write and sends it if there's nothing -// already being written. -// -// There is no pushback here (the serve goroutine never blocks). It's -// the http.Handlers that block, waiting for their previous frames to -// make it onto the wire -// -// If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wr FrameWriteRequest) { - sc.serveG.check() - - // If true, wr will not be written and wr.done will not be signaled. - var ignoreWrite bool - - // We are not allowed to write frames on closed streams. RFC 7540 Section - // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on - // a closed stream." Our server never sends PRIORITY, so that exception - // does not apply. - // - // The serverConn might close an open stream while the stream's handler - // is still running. For example, the server might close a stream when it - // receives bad data from the client. If this happens, the handler might - // attempt to write a frame after the stream has been closed (since the - // handler hasn't yet been notified of the close). In this case, we simply - // ignore the frame. The handler will notice that the stream is closed when - // it waits for the frame to be written. - // - // As an exception to this rule, we allow sending RST_STREAM after close. - // This allows us to immediately reject new streams without tracking any - // state for those streams (except for the queued RST_STREAM frame). This - // may result in duplicate RST_STREAMs in some cases, but the client should - // ignore those. - if wr.StreamID() != 0 { - _, isReset := wr.write.(StreamError) - if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { - ignoreWrite = true - } - } - - // Don't send a 100-continue response if we've already sent headers. - // See golang.org/issue/14030. - switch wr.write.(type) { - case *writeResHeaders: - wr.stream.wroteHeaders = true - case write100ContinueHeadersFrame: - if wr.stream.wroteHeaders { - // We do not need to notify wr.done because this frame is - // never written with wr.done != nil. - if wr.done != nil { - panic("wr.done != nil for write100ContinueHeadersFrame") - } - ignoreWrite = true - } - } - - if !ignoreWrite { - sc.writeSched.Push(wr) - } - sc.scheduleFrameWrite() -} - -// startFrameWrite starts a goroutine to write wr (in a separate -// goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wr. -func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { - sc.serveG.check() - if sc.writingFrame { - panic("internal error: can only be writing one frame at a time") - } - - st := wr.stream - if st != nil { - switch st.state { - case stateHalfClosedLocal: - switch wr.write.(type) { - case StreamError, handlerPanicRST, writeWindowUpdate: - // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE - // in this state. (We never send PRIORITY from the server, so that is not checked.) - default: - panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) - } - case stateClosed: - panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) - } - } - if wpp, ok := wr.write.(*writePushPromise); ok { - var err error - wpp.promisedID, err = wpp.allocatePromisedID() - if err != nil { - sc.writingFrameAsync = false - wr.replyToWriter(err) - return - } - } - - sc.writingFrame = true - sc.needsFrameFlush = true - if wr.write.staysWithinBuffer(sc.bw.Available()) { - sc.writingFrameAsync = false - err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) - } else { - sc.writingFrameAsync = true - go sc.writeFrameAsync(wr) - } -} - -// errHandlerPanicked is the error given to any callers blocked in a read from -// Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. -var errHandlerPanicked = errors.New("http2: handler panicked") - -// wroteFrame is called on the serve goroutine with the result of -// whatever happened on writeFrameAsync. -func (sc *serverConn) wroteFrame(res frameWriteResult) { - sc.serveG.check() - if !sc.writingFrame { - panic("internal error: expected to be already writing a frame") - } - sc.writingFrame = false - sc.writingFrameAsync = false - - wr := res.wr - - if writeEndsStream(wr.write) { - st := wr.stream - if st == nil { - panic("internal error: expecting non-nil stream") - } - switch st.state { - case stateOpen: - // Here we would go to stateHalfClosedLocal in - // theory, but since our handler is done and - // the net/http package provides no mechanism - // for closing a ResponseWriter while still - // reading data (see possible TODO at top of - // this file), we go into closed state here - // anyway, after telling the peer we're - // hanging up on them. We'll transition to - // stateClosed after the RST_STREAM frame is - // written. - st.state = stateHalfClosedLocal - // Section 8.1: a server MAY request that the client abort - // transmission of a request without error by sending a - // RST_STREAM with an error code of NO_ERROR after sending - // a complete response. - sc.resetStream(streamError(st.id, ErrCodeNo)) - case stateHalfClosedRemote: - sc.closeStream(st, errHandlerComplete) - } - } else { - switch v := wr.write.(type) { - case StreamError: - // st may be unknown if the RST_STREAM was generated to reject bad input. - if st, ok := sc.streams[v.StreamID]; ok { - sc.closeStream(st, v) - } - case handlerPanicRST: - sc.closeStream(wr.stream, errHandlerPanicked) - } - } - - // Reply (if requested) to unblock the ServeHTTP goroutine. - wr.replyToWriter(res.err) - - sc.scheduleFrameWrite() -} - -// scheduleFrameWrite tickles the frame writing scheduler. -// -// If a frame is already being written, nothing happens. This will be called again -// when the frame is done being written. -// -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. -// -// If a frame isn't being written and there's nothing else to send, we -// flush the write buffer. -func (sc *serverConn) scheduleFrameWrite() { - sc.serveG.check() - if sc.writingFrame || sc.inFrameScheduleLoop { - return - } - sc.inFrameScheduleLoop = true - for !sc.writingFrameAsync { - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(FrameWriteRequest{ - write: &writeGoAway{ - maxStreamID: sc.maxClientStreamID, - code: sc.goAwayCode, - }, - }) - continue - } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) - continue - } - if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { - if wr, ok := sc.writeSched.Pop(); ok { - sc.startFrameWrite(wr) - continue - } - } - if sc.needsFrameFlush { - sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - continue - } - break - } - sc.inFrameScheduleLoop = false -} - -// startGracefulShutdown gracefully shuts down a connection. This -// sends GOAWAY with ErrCodeNo to tell the client we're gracefully -// shutting down. The connection isn't closed until all current -// streams are done. -// -// startGracefulShutdown returns immediately; it does not wait until -// the connection has shut down. -func (sc *serverConn) startGracefulShutdown() { - sc.serveG.checkNotOn() // NOT - sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) -} - -func (sc *serverConn) startGracefulShutdownInternal() { - sc.goAwayIn(ErrCodeNo, 0) -} - -func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - var forceCloseIn time.Duration - if code != ErrCodeNo { - forceCloseIn = 250 * time.Millisecond - } else { - // TODO: configurable - forceCloseIn = 1 * time.Second - } - sc.goAwayIn(code, forceCloseIn) -} - -func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { - sc.serveG.check() - if sc.inGoAway { - return - } - if forceCloseIn != 0 { - sc.shutDownIn(forceCloseIn) - } - sc.inGoAway = true - sc.needToSendGoAway = true - sc.goAwayCode = code - sc.scheduleFrameWrite() -} - -func (sc *serverConn) shutDownIn(d time.Duration) { - sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) -} - -func (sc *serverConn) resetStream(se StreamError) { - sc.serveG.check() - sc.writeFrame(FrameWriteRequest{write: se}) - if st, ok := sc.streams[se.StreamID]; ok { - st.resetQueued = true - } -} - -// processFrameFromReader processes the serve loop's read from readFrameCh from the -// frame-reading goroutine. -// processFrameFromReader returns whether the connection should be kept open. -func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { - sc.serveG.check() - err := res.err - if err != nil { - if err == ErrFrameTooLarge { - sc.goAway(ErrCodeFrameSize) - return true // goAway will close the loop - } - clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) - if clientGone { - // TODO: could we also get into this state if - // the peer does a half close - // (e.g. CloseWrite) because they're done - // sending frames but they're still wanting - // our open replies? Investigate. - // TODO: add CloseWrite to crypto/tls.Conn first - // so we have a way to test this? I suppose - // just for testing we could have a non-TLS mode. - return false - } - } else { - f := res.f - if VerboseLogs { - sc.vlogf("http2: server read frame %v", summarizeFrame(f)) - } - err = sc.processFrame(f) - if err == nil { - return true - } - } - - switch ev := err.(type) { - case StreamError: - sc.resetStream(ev) - return true - case goAwayFlowError: - sc.goAway(ErrCodeFlowControl) - return true - case ConnectionError: - sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) - sc.goAway(ErrCode(ev)) - return true // goAway will handle shutdown - default: - if res.err != nil { - sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) - } else { - sc.logf("http2: server closing client connection: %v", err) - } - return false - } -} - -func (sc *serverConn) processFrame(f Frame) error { - sc.serveG.check() - - // First frame received must be SETTINGS. - if !sc.sawFirstSettings { - if _, ok := f.(*SettingsFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } - sc.sawFirstSettings = true - } - - switch f := f.(type) { - case *SettingsFrame: - return sc.processSettings(f) - case *MetaHeadersFrame: - return sc.processHeaders(f) - case *WindowUpdateFrame: - return sc.processWindowUpdate(f) - case *PingFrame: - return sc.processPing(f) - case *DataFrame: - return sc.processData(f) - case *RSTStreamFrame: - return sc.processResetStream(f) - case *PriorityFrame: - return sc.processPriority(f) - case *GoAwayFrame: - return sc.processGoAway(f) - case *PushPromiseFrame: - // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE - // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - default: - sc.vlogf("http2: server ignoring frame: %v", f.Header()) - return nil - } -} - -func (sc *serverConn) processPing(f *PingFrame) error { - sc.serveG.check() - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - if f.StreamID != 0 { - // "PING frames are not associated with any individual - // stream. If a PING frame is received with a stream - // identifier field value other than 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) - return nil -} - -func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { - sc.serveG.check() - switch { - case f.StreamID != 0: // stream-level flow control - state, st := sc.state(f.StreamID) - if state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil { - // "WINDOW_UPDATE can be sent by a peer that has sent a - // frame bearing the END_STREAM flag. This means that a - // receiver could receive a WINDOW_UPDATE frame on a "half - // closed (remote)" or "closed" stream. A receiver MUST - // NOT treat this as an error, see Section 5.1." - return nil - } - if !st.flow.add(int32(f.Increment)) { - return streamError(f.StreamID, ErrCodeFlowControl) - } - default: // connection-level flow control - if !sc.flow.add(int32(f.Increment)) { - return goAwayFlowError{} - } - } - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { - sc.serveG.check() - - state, st := sc.state(f.StreamID) - if state == stateIdle { - // 6.4 "RST_STREAM frames MUST NOT be sent for a - // stream in the "idle" state. If a RST_STREAM frame - // identifying an idle stream is received, the - // recipient MUST treat this as a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - } - if st != nil { - st.cancelCtx() - sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) - } - return nil -} - -func (sc *serverConn) closeStream(st *stream, err error) { - sc.serveG.check() - if st.state == stateIdle || st.state == stateClosed { - panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) - } - st.state = stateClosed - if st.writeDeadline != nil { - st.writeDeadline.Stop() - } - if st.isPushed() { - sc.curPushedStreams-- - } else { - sc.curClientStreams-- - } - delete(sc.streams, st.id) - if len(sc.streams) == 0 { - sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { - sc.idleTimer.Reset(sc.srv.IdleTimeout) - } - if h1ServerKeepAlivesDisabled(sc.hs) { - sc.startGracefulShutdownInternal() - } - } - if p := st.body; p != nil { - // Return any buffered unread bytes worth of conn-level flow control. - // See golang.org/issue/16481 - sc.sendWindowUpdate(nil, p.Len()) - - p.CloseWithError(err) - } - st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.CloseStream(st.id) -} - -func (sc *serverConn) processSettings(f *SettingsFrame) error { - sc.serveG.check() - if f.IsAck() { - sc.unackedSettings-- - if sc.unackedSettings < 0 { - // Why is the peer ACKing settings we never sent? - // The spec doesn't mention this case, but - // hang up on them anyway. - return ConnectionError(ErrCodeProtocol) - } - return nil - } - if err := f.ForeachSetting(sc.processSetting); err != nil { - return err - } - sc.needToSendSettingsAck = true - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processSetting(s Setting) error { - sc.serveG.check() - if err := s.Valid(); err != nil { - return err - } - if VerboseLogs { - sc.vlogf("http2: server processing setting %v", s) - } - switch s.ID { - case SettingHeaderTableSize: - sc.headerTableSize = s.Val - sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) - case SettingEnablePush: - sc.pushEnabled = s.Val != 0 - case SettingMaxConcurrentStreams: - sc.clientMaxStreams = s.Val - case SettingInitialWindowSize: - return sc.processSettingInitialWindowSize(s.Val) - case SettingMaxFrameSize: - sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 - case SettingMaxHeaderListSize: - sc.peerMaxHeaderListSize = s.Val - default: - // Unknown setting: "An endpoint that receives a SETTINGS - // frame with any unknown or unsupported identifier MUST - // ignore that setting." - if VerboseLogs { - sc.vlogf("http2: server ignoring unknown setting %v", s) - } - } - return nil -} - -func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { - sc.serveG.check() - // Note: val already validated to be within range by - // processSetting's Valid call. - - // "A SETTINGS frame can alter the initial flow control window - // size for all current streams. When the value of - // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST - // adjust the size of all stream flow control windows that it - // maintains by the difference between the new value and the - // old value." - old := sc.initialStreamSendWindowSize - sc.initialStreamSendWindowSize = int32(val) - growth := int32(val) - old // may be negative - for _, st := range sc.streams { - if !st.flow.add(growth) { - // 6.9.2 Initial Flow Control Window Size - // "An endpoint MUST treat a change to - // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow - // control window to exceed the maximum size as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR." - return ConnectionError(ErrCodeFlowControl) - } - } - return nil -} - -func (sc *serverConn) processData(f *DataFrame) error { - sc.serveG.check() - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - data := f.Data() - - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID - state, st := sc.state(id) - if id == 0 || state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { - // This includes sending a RST_STREAM if the stream is - // in stateHalfClosedLocal (which currently means that - // the http.Handler returned, so it's done reading & - // done writing). Try to stop the client from sending - // more DATA. - - // But still enforce their connection-level flow control, - // and return any flow control bytes since we're not going - // to consume them. - if sc.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil, int(f.Length)) // conn-level - - if st != nil && st.resetQueued { - // Already have a stream error in flight. Don't send another. - return nil - } - return streamError(id, ErrCodeStreamClosed) - } - if st.body == nil { - panic("internal error: should have a body in this state") - } - - // Sender sending more than they'd declared? - if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return streamError(id, ErrCodeStreamClosed) - } - if f.Length > 0 { - // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - st.inflow.take(int32(f.Length)) - - if len(data) > 0 { - wrote, err := st.body.Write(data) - if err != nil { - return streamError(id, ErrCodeStreamClosed) - } - if wrote != len(data) { - panic("internal error: bad Writer") - } - st.bodyBytes += int64(len(data)) - } - - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } - } - if f.StreamEnded() { - st.endStream() - } - return nil -} - -func (sc *serverConn) processGoAway(f *GoAwayFrame) error { - sc.serveG.check() - if f.ErrCode != ErrCodeNo { - sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } else { - sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } - sc.startGracefulShutdownInternal() - // http://tools.ietf.org/html/rfc7540#section-6.8 - // We should not create any new streams, which means we should disable push. - sc.pushEnabled = false - return nil -} - -// isPushed reports whether the stream is server-initiated. -func (st *stream) isPushed() bool { - return st.id%2 == 0 -} - -// endStream closes a Request.Body's pipe. It is called when a DATA -// frame says a request body is over (or after trailers). -func (st *stream) endStream() { - sc := st.sc - sc.serveG.check() - - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) - st.body.CloseWithError(io.EOF) - } - st.state = stateHalfClosedRemote -} - -// copyTrailersToHandlerRequest is run in the Handler's goroutine in -// its Request.Body.Read just before it gets io.EOF. -func (st *stream) copyTrailersToHandlerRequest() { - for k, vv := range st.trailer { - if _, ok := st.reqTrailer[k]; ok { - // Only copy it over it was pre-declared. - st.reqTrailer[k] = vv - } - } -} - -// onWriteTimeout is run on its own goroutine (from time.AfterFunc) -// when the stream's WriteTimeout has fired. -func (st *stream) onWriteTimeout() { - st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) -} - -func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { - sc.serveG.check() - id := f.StreamID - if sc.inGoAway { - // Ignore. - return nil - } - // http://tools.ietf.org/html/rfc7540#section-5.1.1 - // Streams initiated by a client MUST use odd-numbered stream - // identifiers. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) - } - // A HEADERS frame can be used to create a new stream or - // send a trailer for an open one. If we already have a stream - // open, let it process its own HEADERS frame (trailers at this - // point, if it's valid). - if st := sc.streams[f.StreamID]; st != nil { - if st.resetQueued { - // We're sending RST_STREAM to close the stream, so don't bother - // processing this frame. - return nil - } - return st.processTrailerHeaders(f) - } - - // [...] The identifier of a newly established stream MUST be - // numerically greater than all streams that the initiating - // endpoint has opened or reserved. [...] An endpoint that - // receives an unexpected stream identifier MUST respond with - // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxClientStreamID { - return ConnectionError(ErrCodeProtocol) - } - sc.maxClientStreamID = id - - if sc.idleTimer != nil { - sc.idleTimer.Stop() - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.2 - // [...] Endpoints MUST NOT exceed the limit set by their peer. An - // endpoint that receives a HEADERS frame that causes their - // advertised concurrent stream limit to be exceeded MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR - // or REFUSED_STREAM. - if sc.curClientStreams+1 > sc.advMaxStreams { - if sc.unackedSettings == 0 { - // They should know better. - return streamError(id, ErrCodeProtocol) - } - // Assume it's a network race, where they just haven't - // received our last SETTINGS update. But actually - // this can't happen yet, because we don't yet provide - // a way for users to adjust server parameters at - // runtime. - return streamError(id, ErrCodeRefusedStream) - } - - initialState := stateOpen - if f.StreamEnded() { - initialState = stateHalfClosedRemote - } - st := sc.newStream(id, 0, initialState) - - if f.HasPriority() { - if err := checkPriority(f.StreamID, f.Priority); err != nil { - return err - } - sc.writeSched.AdjustStream(st.id, f.Priority) - } - - rw, req, err := sc.newWriterAndRequest(st, f) - if err != nil { - return err - } - st.reqTrailer = req.Trailer - if st.reqTrailer != nil { - st.trailer = make(http.Header) - } - st.body = req.Body.(*requestBody).pipe // may be nil - st.declBodyBytes = req.ContentLength - - handler := sc.handler.ServeHTTP - if f.Truncated { - // Their header list was too long. Send a 431 error. - handler = handleHeaderListTooLong - } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { - handler = new400Handler(err) - } - - // The net/http package sets the read deadline from the - // http.Server.ReadTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here after the request headers are read, - // similar to how the http1 server works. Here it's - // technically more like the http1 Server's ReadHeaderTimeout - // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { - sc.conn.SetReadDeadline(time.Time{}) - } - - go sc.runHandler(rw, req, handler) - return nil -} - -func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return streamError(st.id, ErrCodeProtocol) - } - - if len(f.PseudoFields()) > 0 { - return streamError(st.id, ErrCodeProtocol) - } - if st.trailer != nil { - for _, hf := range f.RegularFields() { - key := sc.canonicalHeader(hf.Name) - if !ValidTrailerHeader(key) { - // TODO: send more details to the peer somehow. But http2 has - // no way to send debug data at a stream level. Discuss with - // HTTP folk. - return streamError(st.id, ErrCodeProtocol) - } - st.trailer[key] = append(st.trailer[key], hf.Value) - } - } - st.endStream() - return nil -} - -func checkPriority(streamID uint32, p PriorityParam) error { - if streamID == p.StreamDep { - // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." - // Section 5.3.3 says that a stream can depend on one of its dependencies, - // so it's only self-dependencies that are forbidden. - return streamError(streamID, ErrCodeProtocol) - } - return nil -} - -func (sc *serverConn) processPriority(f *PriorityFrame) error { - if sc.inGoAway { - return nil - } - if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { - return err - } - sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) - return nil -} - -func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { - sc.serveG.check() - if id == 0 { - panic("internal error: cannot create stream with id 0") - } - - ctx, cancelCtx := contextWithCancel(sc.baseCtx) - st := &stream{ - sc: sc, - id: id, - state: state, - ctx: ctx, - cancelCtx: cancelCtx, - } - st.cw.Init() - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) - } - - sc.streams[id] = st - sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) - if st.isPushed() { - sc.curPushedStreams++ - } else { - sc.curClientStreams++ - } - if sc.curOpenStreams() == 1 { - sc.setConnState(http.StateActive) - } - - return st -} - -func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - } - - isConnect := rp.method == "CONNECT" - if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { - // See 8.1.2.6 Malformed Requests and Responses: - // - // Malformed requests or responses that are detected - // MUST be treated as a stream error (Section 5.4.2) - // of type PROTOCOL_ERROR." - // - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid - // value for the :method, :scheme, and :path - // pseudo-header fields" - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - rp.header = make(http.Header) - for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) - } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") - } - - rw, req, err := sc.newWriterAndRequestNoBody(st, rp) - if err != nil { - return nil, nil, err - } - if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - req.Body.(*requestBody).pipe = &pipe{ - b: &dataBuffer{expected: req.ContentLength}, - } - } - return rw, req, nil -} - -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { - tlsState = sc.tlsState - } - - needsContinue := rp.header.Get("Expect") == "100-continue" - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, streamError(st.id, ErrCodeProtocol) - } - requestURI = rp.path - } - - body := &requestBody{ - conn: sc, - stream: st, - needsContinue: needsContinue, - } - req := &http.Request{ - Method: rp.method, - URL: url_, - RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, - Proto: "HTTP/2.0", - ProtoMajor: 2, - ProtoMinor: 0, - TLS: tlsState, - Host: rp.authority, - Body: body, - Trailer: trailer, - } - req = requestWithContext(req, st.ctx) - - rws := responseWriterStatePool.Get().(*responseWriterState) - bwSave := rws.bw - *rws = responseWriterState{} // zero all the fields - rws.conn = sc - rws.bw = bwSave - rws.bw.Reset(chunkWriter{rws}) - rws.stream = st - rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil -} - -// Run on its own goroutine. -func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - didPanic := true - defer func() { - rw.rws.stream.cancelCtx() - if didPanic { - e := recover() - sc.writeFrameFromHandler(FrameWriteRequest{ - write: handlerPanicRST{rw.rws.stream.id}, - stream: rw.rws.stream, - }) - // Same as net/http: - if shouldLogPanic(e) { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) - } - return - } - rw.handlerDone() - }() - handler(rw, req) - didPanic = false -} - -func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { - // 10.5.1 Limits on Header Block Size: - // .. "A server that receives a larger header block than it is - // willing to handle can send an HTTP 431 (Request Header Fields Too - // Large) status code" - const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ - w.WriteHeader(statusRequestHeaderFieldsTooLarge) - io.WriteString(w, "

    HTTP Error 431

    Request Header Field(s) Too Large

    ") -} - -// called from handler goroutines. -// h may be nil. -func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { - sc.serveG.checkNotOn() // NOT on - var errc chan error - if headerData.h != nil { - // If there's a header map (which we don't own), so we have to block on - // waiting for this frame to be written, so an http.Flush mid-handler - // writes out the correct value of keys, before a handler later potentially - // mutates it. - errc = errChanPool.Get().(chan error) - } - if err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: headerData, - stream: st, - done: errc, - }); err != nil { - return err - } - if errc != nil { - select { - case err := <-errc: - errChanPool.Put(errc) - return err - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - } - } - return nil -} - -// called from handler goroutines. -func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(FrameWriteRequest{ - write: write100ContinueHeadersFrame{st.id}, - stream: st, - }) -} - -// A bodyReadMsg tells the server loop that the http.Handler read n -// bytes of the DATA from the client on the given stream. -type bodyReadMsg struct { - st *stream - n int -} - -// called from handler goroutines. -// Notes that the handler for the given stream ID read n bytes of its body -// and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { - sc.serveG.checkNotOn() // NOT on - if n > 0 { - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: - } - } -} - -func (sc *serverConn) noteBodyRead(st *stream, n int) { - sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level - if st.state != stateHalfClosedRemote && st.state != stateClosed { - // Don't send this WINDOW_UPDATE if the stream is closed - // remotely. - sc.sendWindowUpdate(st, n) - } -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { - sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } - var streamID uint32 - if st != nil { - streamID = st.id - } - sc.writeFrame(FrameWriteRequest{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, - stream: st, - }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } -} - -// requestBody is the Handler's Request.Body type. -// Read and Close may be called concurrently. -type requestBody struct { - stream *stream - conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue -} - -func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true - return nil -} - -func (b *requestBody) Read(p []byte) (n int, err error) { - if b.needsContinue { - b.needsContinue = false - b.conn.write100ContinueHeaders(b.stream) - } - if b.pipe == nil || b.sawEOF { - return 0, io.EOF - } - n, err = b.pipe.Read(p) - if err == io.EOF { - b.sawEOF = true - } - if b.conn == nil && inTests { - return - } - b.conn.noteBodyReadFromHandler(b.stream, n, err) - return -} - -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The -// responseWriterState pointer inside is zeroed at the end of a -// request (in handlerDone) and calls on the responseWriter thereafter -// simply crash (caller's mistake), but the much larger responseWriterState -// and buffers are reused between multiple requests. -type responseWriter struct { - rws *responseWriterState -} - -// Optional http.ResponseWriter interfaces implemented. -var ( - _ http.CloseNotifier = (*responseWriter)(nil) - _ http.Flusher = (*responseWriter)(nil) - _ stringWriter = (*responseWriter)(nil) -) - -type responseWriterState struct { - // immutable within a request: - stream *stream - req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't - conn *serverConn - - // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc - bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} - - // mutated by http.Handler goroutine: - handlerHeader http.Header // nil until called - snapHeader http.Header // snapshot of handlerHeader at WriteHeader time - trailers []string // set in writeChunk - status int // status code passed to WriteHeader - wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. - sentHeader bool // have we sent the header frame? - handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState - - sentContentLen int64 // non-zero if handler set a Content-Length header - wroteBytes int64 - - closeNotifierMu sync.Mutex // guards closeNotifierCh - closeNotifierCh chan bool // nil until first used -} - -type chunkWriter struct{ rws *responseWriterState } - -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } - -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } - -// declareTrailer is called for each Trailer header when the -// response header is written. It notes that a header will need to be -// written in the trailers at the end of the response. -func (rws *responseWriterState) declareTrailer(k string) { - k = http.CanonicalHeaderKey(k) - if !ValidTrailerHeader(k) { - // Forbidden by RFC 2616 14.40. - rws.conn.logf("ignoring invalid trailer %q", k) - return - } - if !strSliceContains(rws.trailers, k) { - rws.trailers = append(rws.trailers, k) - } -} - -// writeChunk writes chunks from the bufio.Writer. But because -// bufio.Writer may bypass its chunking, sometimes p may be -// arbitrarily large. -// -// writeChunk is also responsible (on the first chunk) for sending the -// HEADER response. -func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { - if !rws.wroteHeader { - rws.writeHeader(200) - } - - isHeadResp := rws.req.Method == "HEAD" - if !rws.sentHeader { - rws.sentHeader = true - var ctype, clen string - if clen = rws.snapHeader.Get("Content-Length"); clen != "" { - rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 - } else { - clen = "" - } - } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { - clen = strconv.Itoa(len(p)) - } - _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) { - ctype = http.DetectContentType(p) - } - var date string - if _, ok := rws.snapHeader["Date"]; !ok { - // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) - } - - for _, v := range rws.snapHeader["Trailer"] { - foreachHeaderElement(v, rws.declareTrailer) - } - - endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - httpResCode: rws.status, - h: rws.snapHeader, - endStream: endStream, - contentType: ctype, - contentLength: clen, - date: date, - }) - if err != nil { - rws.dirty = true - return 0, err - } - if endStream { - return 0, nil - } - } - if isHeadResp { - return len(p), nil - } - if len(p) == 0 && !rws.handlerDone { - return 0, nil - } - - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - - endStream := rws.handlerDone && !rws.hasTrailers() - if len(p) > 0 || endStream { - // only send a 0 byte DATA frame if we're ending the stream. - if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true - return 0, err - } - } - - if rws.handlerDone && rws.hasTrailers() { - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - h: rws.handlerHeader, - trailers: rws.trailers, - endStream: true, - }) - if err != nil { - rws.dirty = true - } - return len(p), err - } - return len(p), nil -} - -// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys -// that, if present, signals that the map entry is actually for -// the response trailers, and not the response headers. The prefix -// is stripped after the ServeHTTP call finishes and the values are -// sent in the trailers. -// -// This mechanism is intended only for trailers that are not known -// prior to the headers being written. If the set of trailers is fixed -// or known before the header is written, the normal Go trailers mechanism -// is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers -const TrailerPrefix = "Trailer:" - -// promoteUndeclaredTrailers permits http.Handlers to set trailers -// after the header has already been flushed. Because the Go -// ResponseWriter interface has no way to set Trailers (only the -// Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 2616 -// says you SHOULD (but not must) predeclare any trailers in the -// header, the official ResponseWriter rules said trailers in Go must -// be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the -// Trailers, we pick out the fields of Headers that were declared as -// trailers. That worked for a while, until we found the first major -// user of Trailers in the wild: gRPC (using them only over http2), -// and gRPC libraries permit setting trailers mid-stream without -// predeclarnig them. So: change of plans. We still permit the old -// way, but we also permit this hack: if a Header() key begins with -// "Trailer:", the suffix of that key is a Trailer. Because ':' is an -// invalid token byte anyway, there is no ambiguity. (And it's already -// filtered out) It's mildly hacky, but not terrible. -// -// This method runs after the Handler is done and promotes any Header -// fields to be trailers. -func (rws *responseWriterState) promoteUndeclaredTrailers() { - for k, vv := range rws.handlerHeader { - if !strings.HasPrefix(k, TrailerPrefix) { - continue - } - trailerKey := strings.TrimPrefix(k, TrailerPrefix) - rws.declareTrailer(trailerKey) - rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv - } - - if len(rws.trailers) > 1 { - sorter := sorterPool.Get().(*sorter) - sorter.SortStrings(rws.trailers) - sorterPool.Put(sorter) - } -} - -func (w *responseWriter) Flush() { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } - } else { - // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it - // ourselves to force the HTTP response header and/or - // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) - } -} - -func (w *responseWriter) CloseNotify() <-chan bool { - rws := w.rws - if rws == nil { - panic("CloseNotify called after Handler finished") - } - rws.closeNotifierMu.Lock() - ch := rws.closeNotifierCh - if ch == nil { - ch = make(chan bool, 1) - rws.closeNotifierCh = ch - cw := rws.stream.cw - go func() { - cw.Wait() // wait for close - ch <- true - }() - } - rws.closeNotifierMu.Unlock() - return ch -} - -func (w *responseWriter) Header() http.Header { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.handlerHeader == nil { - rws.handlerHeader = make(http.Header) - } - return rws.handlerHeader -} - -func (w *responseWriter) WriteHeader(code int) { - rws := w.rws - if rws == nil { - panic("WriteHeader called after Handler finished") - } - rws.writeHeader(code) -} - -func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) - } - } -} - -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// The Life Of A Write is like this: -// -// * Handler calls w.Write or w.WriteString -> -// * -> rws.bw (*bufio.Writer) -> -// * (Handler might call Flush) -// * -> chunkWriter{rws} -// * -> responseWriterState.writeChunk(p []byte) -// * -> responseWriterState.writeChunk (most of the magic; see comment there) -func (w *responseWriter) Write(p []byte) (n int, err error) { - return w.write(len(p), p, "") -} - -func (w *responseWriter) WriteString(s string) (n int, err error) { - return w.write(len(s), nil, s) -} - -// either dataB or dataS is non-zero. -func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { - rws := w.rws - if rws == nil { - panic("Write called after Handler finished") - } - if !rws.wroteHeader { - w.WriteHeader(200) - } - if !bodyAllowedForStatus(rws.status) { - return 0, http.ErrBodyNotAllowed - } - rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set - if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { - // TODO: send a RST_STREAM - return 0, errors.New("http2: handler wrote more than declared Content-Length") - } - - if dataB != nil { - return rws.bw.Write(dataB) - } else { - return rws.bw.WriteString(dataS) - } -} - -func (w *responseWriter) handlerDone() { - rws := w.rws - dirty := rws.dirty - rws.handlerDone = true - w.Flush() - w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } -} - -// Push errors. -var ( - ErrRecursivePush = errors.New("http2: recursive push not allowed") - ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") -) - -// pushOptions is the internal version of http.PushOptions, which we -// cannot include here because it's only defined in Go 1.8 and later. -type pushOptions struct { - Method string - Header http.Header -} - -func (w *responseWriter) push(target string, opts pushOptions) error { - st := w.rws.stream - sc := st.sc - sc.serveG.checkNotOn() - - // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." - // http://tools.ietf.org/html/rfc7540#section-6.6 - if st.isPushed() { - return ErrRecursivePush - } - - // Default options. - if opts.Method == "" { - opts.Method = "GET" - } - if opts.Header == nil { - opts.Header = http.Header{} - } - wantScheme := "http" - if w.rws.req.TLS != nil { - wantScheme = "https" - } - - // Validate the request. - u, err := url.Parse(target) - if err != nil { - return err - } - if u.Scheme == "" { - if !strings.HasPrefix(target, "/") { - return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) - } - u.Scheme = wantScheme - u.Host = w.rws.req.Host - } else { - if u.Scheme != wantScheme { - return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) - } - if u.Host == "" { - return errors.New("URL must have a host") - } - } - for k := range opts.Header { - if strings.HasPrefix(k, ":") { - return fmt.Errorf("promised request headers cannot include pseudo header %q", k) - } - // These headers are meaningful only if the request has a body, - // but PUSH_PROMISE requests cannot have a body. - // http://tools.ietf.org/html/rfc7540#section-8.2 - // Also disallow Host, since the promised URL must be absolute. - switch strings.ToLower(k) { - case "content-length", "content-encoding", "trailer", "te", "expect", "host": - return fmt.Errorf("promised request headers cannot include %q", k) - } - } - if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { - return err - } - - // The RFC effectively limits promised requests to GET and HEAD: - // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" - // http://tools.ietf.org/html/rfc7540#section-8.2 - if opts.Method != "GET" && opts.Method != "HEAD" { - return fmt.Errorf("method %q must be GET or HEAD", opts.Method) - } - - msg := &startPushRequest{ - parent: st, - method: opts.Method, - url: u, - header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case sc.serveMsgCh <- msg: - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case err := <-msg.done: - errChanPool.Put(msg.done) - return err - } -} - -type startPushRequest struct { - parent *stream - method string - url *url.URL - header http.Header - done chan error -} - -func (sc *serverConn) startPush(msg *startPushRequest) { - sc.serveG.check() - - // http://tools.ietf.org/html/rfc7540#section-6.6. - // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that - // is in either the "open" or "half-closed (remote)" state. - if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { - // responseWriter.Push checks that the stream is peer-initiaed. - msg.done <- errStreamClosed - return - } - - // http://tools.ietf.org/html/rfc7540#section-6.6. - if !sc.pushEnabled { - msg.done <- http.ErrNotSupported - return - } - - // PUSH_PROMISE frames must be sent in increasing order by stream ID, so - // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE - // is written. Once the ID is allocated, we start the request handler. - allocatePromisedID := func() (uint32, error) { - sc.serveG.check() - - // Check this again, just in case. Technically, we might have received - // an updated SETTINGS by the time we got around to writing this frame. - if !sc.pushEnabled { - return 0, http.ErrNotSupported - } - // http://tools.ietf.org/html/rfc7540#section-6.5.2. - if sc.curPushedStreams+1 > sc.clientMaxStreams { - return 0, ErrPushLimitReached - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.1. - // Streams initiated by the server MUST use even-numbered identifiers. - // A server that is unable to establish a new stream identifier can send a GOAWAY - // frame so that the client is forced to open a new connection for new streams. - if sc.maxPushPromiseID+2 >= 1<<31 { - sc.startGracefulShutdownInternal() - return 0, ErrPushLimitReached - } - sc.maxPushPromiseID += 2 - promisedID := sc.maxPushPromiseID - - // http://tools.ietf.org/html/rfc7540#section-8.2. - // Strictly speaking, the new stream should start in "reserved (local)", then - // transition to "half closed (remote)" after sending the initial HEADERS, but - // we start in "half closed (remote)" for simplicity. - // See further comments at the definition of stateHalfClosedRemote. - promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE - }) - if err != nil { - // Should not happen, since we've already validated msg.url. - panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) - } - - go sc.runHandler(rw, req, sc.handler.ServeHTTP) - return promisedID, nil - } - - sc.writeFrame(FrameWriteRequest{ - write: &writePushPromise{ - streamID: msg.parent.id, - method: msg.method, - url: msg.url, - h: msg.header, - allocatePromisedID: allocatePromisedID, - }, - stream: msg.parent, - done: msg.done, - }) -} - -// foreachHeaderElement splits v according to the "#rule" construction -// in RFC 2616 section 2.1 and calls fn for each non-empty element. -func foreachHeaderElement(v string, fn func(string)) { - v = textproto.TrimString(v) - if v == "" { - return - } - if !strings.Contains(v, ",") { - fn(v) - return - } - for _, f := range strings.Split(v, ",") { - if f = textproto.TrimString(f); f != "" { - fn(f) - } - } -} - -// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 -var connHeaders = []string{ - "Connection", - "Keep-Alive", - "Proxy-Connection", - "Transfer-Encoding", - "Upgrade", -} - -// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, -// per RFC 7540 Section 8.1.2.2. -// The returned error is reported to users. -func checkValidHTTP2RequestHeaders(h http.Header) error { - for _, k := range connHeaders { - if _, ok := h[k]; ok { - return fmt.Errorf("request header %q is not valid in HTTP/2", k) - } - } - te := h["Te"] - if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { - return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) - } - return nil -} - -func new400Handler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - http.Error(w, err.Error(), http.StatusBadRequest) - } -} - -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 -func ValidTrailerHeader(name string) bool { - name = http.CanonicalHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} - -// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives -// disabled. See comments on h1ServerShutdownChan above for why -// the code is written this way. -func h1ServerKeepAlivesDisabled(hs *http.Server) bool { - var x interface{} = hs - type I interface { - doKeepAlives() bool - } - if hs, ok := x.(I); ok { - return !hs.doKeepAlives() - } - return false -} diff --git a/vendor/golang.org/x/net/http2/server_push_test.go b/vendor/golang.org/x/net/http2/server_push_test.go deleted file mode 100644 index 918fd30d..00000000 --- a/vendor/golang.org/x/net/http2/server_push_test.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package http2 - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "sync" - "testing" - "time" -) - -func TestServer_Push_Success(t *testing.T) { - const ( - mainBody = "index page" - pushedBody = "pushed page" - userAgent = "testagent" - cookie = "testcookie" - ) - - var stURL string - checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error { - if got, want := r.Method, wantMethod; got != want { - return fmt.Errorf("promised Req.Method=%q, want %q", got, want) - } - if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) { - return fmt.Errorf("promised Req.Header=%q, want %q", got, want) - } - if got, want := "https://"+r.Host, stURL; got != want { - return fmt.Errorf("promised Req.Host=%q, want %q", got, want) - } - if r.Body == nil { - return fmt.Errorf("nil Body") - } - if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 { - return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err) - } - return nil - } - - errc := make(chan error, 3) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - switch r.URL.RequestURI() { - case "/": - // Push "/pushed?get" as a GET request, using an absolute URL. - opt := &http.PushOptions{ - Header: http.Header{ - "User-Agent": {userAgent}, - }, - } - if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil { - errc <- fmt.Errorf("error pushing /pushed?get: %v", err) - return - } - // Push "/pushed?head" as a HEAD request, using a path. - opt = &http.PushOptions{ - Method: "HEAD", - Header: http.Header{ - "User-Agent": {userAgent}, - "Cookie": {cookie}, - }, - } - if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil { - errc <- fmt.Errorf("error pushing /pushed?head: %v", err) - return - } - w.Header().Set("Content-Type", "text/html") - w.Header().Set("Content-Length", strconv.Itoa(len(mainBody))) - w.WriteHeader(200) - io.WriteString(w, mainBody) - errc <- nil - - case "/pushed?get": - wantH := http.Header{} - wantH.Set("User-Agent", userAgent) - if err := checkPromisedReq(r, "GET", wantH); err != nil { - errc <- fmt.Errorf("/pushed?get: %v", err) - return - } - w.Header().Set("Content-Type", "text/html") - w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody))) - w.WriteHeader(200) - io.WriteString(w, pushedBody) - errc <- nil - - case "/pushed?head": - wantH := http.Header{} - wantH.Set("User-Agent", userAgent) - wantH.Set("Cookie", cookie) - if err := checkPromisedReq(r, "HEAD", wantH); err != nil { - errc <- fmt.Errorf("/pushed?head: %v", err) - return - } - w.WriteHeader(204) - errc <- nil - - default: - errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) - } - }) - stURL = st.ts.URL - - // Send one request, which should push two responses. - st.greet() - getSlash(st) - for k := 0; k < 3; k++ { - select { - case <-time.After(2 * time.Second): - t.Errorf("timeout waiting for handler %d to finish", k) - case err := <-errc: - if err != nil { - t.Fatal(err) - } - } - } - - checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error { - pp, ok := f.(*PushPromiseFrame) - if !ok { - return fmt.Errorf("got a %T; want *PushPromiseFrame", f) - } - if !pp.HeadersEnded() { - return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame") - } - if got, want := pp.PromiseID, promiseID; got != want { - return fmt.Errorf("got PromiseID %v; want %v", got, want) - } - gotH := st.decodeHeader(pp.HeaderBlockFragment()) - if !reflect.DeepEqual(gotH, wantH) { - return fmt.Errorf("got promised headers %v; want %v", gotH, wantH) - } - return nil - } - checkHeaders := func(f Frame, wantH [][2]string) error { - hf, ok := f.(*HeadersFrame) - if !ok { - return fmt.Errorf("got a %T; want *HeadersFrame", f) - } - gotH := st.decodeHeader(hf.HeaderBlockFragment()) - if !reflect.DeepEqual(gotH, wantH) { - return fmt.Errorf("got response headers %v; want %v", gotH, wantH) - } - return nil - } - checkData := func(f Frame, wantData string) error { - df, ok := f.(*DataFrame) - if !ok { - return fmt.Errorf("got a %T; want *DataFrame", f) - } - if gotData := string(df.Data()); gotData != wantData { - return fmt.Errorf("got response data %q; want %q", gotData, wantData) - } - return nil - } - - // Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA - // Stream 2 has HEADERS + DATA - // Stream 4 has HEADERS - expected := map[uint32][]func(Frame) error{ - 1: { - func(f Frame) error { - return checkPushPromise(f, 2, [][2]string{ - {":method", "GET"}, - {":scheme", "https"}, - {":authority", st.ts.Listener.Addr().String()}, - {":path", "/pushed?get"}, - {"user-agent", userAgent}, - }) - }, - func(f Frame) error { - return checkPushPromise(f, 4, [][2]string{ - {":method", "HEAD"}, - {":scheme", "https"}, - {":authority", st.ts.Listener.Addr().String()}, - {":path", "/pushed?head"}, - {"cookie", cookie}, - {"user-agent", userAgent}, - }) - }, - func(f Frame) error { - return checkHeaders(f, [][2]string{ - {":status", "200"}, - {"content-type", "text/html"}, - {"content-length", strconv.Itoa(len(mainBody))}, - }) - }, - func(f Frame) error { - return checkData(f, mainBody) - }, - }, - 2: { - func(f Frame) error { - return checkHeaders(f, [][2]string{ - {":status", "200"}, - {"content-type", "text/html"}, - {"content-length", strconv.Itoa(len(pushedBody))}, - }) - }, - func(f Frame) error { - return checkData(f, pushedBody) - }, - }, - 4: { - func(f Frame) error { - return checkHeaders(f, [][2]string{ - {":status", "204"}, - }) - }, - }, - } - - consumed := map[uint32]int{} - for k := 0; len(expected) > 0; k++ { - f, err := st.readFrame() - if err != nil { - for id, left := range expected { - t.Errorf("stream %d: missing %d frames", id, len(left)) - } - t.Fatalf("readFrame %d: %v", k, err) - } - id := f.Header().StreamID - label := fmt.Sprintf("stream %d, frame %d", id, consumed[id]) - if len(expected[id]) == 0 { - t.Fatalf("%s: unexpected frame %#+v", label, f) - } - check := expected[id][0] - expected[id] = expected[id][1:] - if len(expected[id]) == 0 { - delete(expected, id) - } - if err := check(f); err != nil { - t.Fatalf("%s: %v", label, err) - } - consumed[id]++ - } -} - -func TestServer_Push_SuccessNoRace(t *testing.T) { - // Regression test for issue #18326. Ensure the request handler can mutate - // pushed request headers without racing with the PUSH_PROMISE write. - errc := make(chan error, 2) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - switch r.URL.RequestURI() { - case "/": - opt := &http.PushOptions{ - Header: http.Header{"User-Agent": {"testagent"}}, - } - if err := w.(http.Pusher).Push("/pushed", opt); err != nil { - errc <- fmt.Errorf("error pushing: %v", err) - return - } - w.WriteHeader(200) - errc <- nil - - case "/pushed": - // Update request header, ensure there is no race. - r.Header.Set("User-Agent", "newagent") - r.Header.Set("Cookie", "cookie") - w.WriteHeader(200) - errc <- nil - - default: - errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) - } - }) - - // Send one request, which should push one response. - st.greet() - getSlash(st) - for k := 0; k < 2; k++ { - select { - case <-time.After(2 * time.Second): - t.Errorf("timeout waiting for handler %d to finish", k) - case err := <-errc: - if err != nil { - t.Fatal(err) - } - } - } -} - -func TestServer_Push_RejectRecursivePush(t *testing.T) { - // Expect two requests, but might get three if there's a bug and the second push succeeds. - errc := make(chan error, 3) - handler := func(w http.ResponseWriter, r *http.Request) error { - baseURL := "https://" + r.Host - switch r.URL.Path { - case "/": - if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil { - return fmt.Errorf("first Push()=%v, want nil", err) - } - return nil - - case "/push1": - if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want { - return fmt.Errorf("Push()=%v, want %v", got, want) - } - return nil - - default: - return fmt.Errorf("unexpected path: %q", r.URL.Path) - } - } - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - errc <- handler(w, r) - }) - defer st.Close() - st.greet() - getSlash(st) - if err := <-errc; err != nil { - t.Errorf("First request failed: %v", err) - } - if err := <-errc; err != nil { - t.Errorf("Second request failed: %v", err) - } -} - -func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) { - // Expect one request, but might get two if there's a bug and the push succeeds. - errc := make(chan error, 2) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - errc <- doPush(w.(http.Pusher), r) - }) - defer st.Close() - st.greet() - if err := st.fr.WriteSettings(settings...); err != nil { - st.t.Fatalf("WriteSettings: %v", err) - } - st.wantSettingsAck() - getSlash(st) - if err := <-errc; err != nil { - t.Error(err) - } - // Should not get a PUSH_PROMISE frame. - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Error("stream should end after headers") - } -} - -func TestServer_Push_RejectIfDisabled(t *testing.T) { - testServer_Push_RejectSingleRequest(t, - func(p http.Pusher, r *http.Request) error { - if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { - return fmt.Errorf("Push()=%v, want %v", got, want) - } - return nil - }, - Setting{SettingEnablePush, 0}) -} - -func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) { - testServer_Push_RejectSingleRequest(t, - func(p http.Pusher, r *http.Request) error { - if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want { - return fmt.Errorf("Push()=%v, want %v", got, want) - } - return nil - }, - Setting{SettingMaxConcurrentStreams, 0}) -} - -func TestServer_Push_RejectWrongScheme(t *testing.T) { - testServer_Push_RejectSingleRequest(t, - func(p http.Pusher, r *http.Request) error { - if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil { - return errors.New("Push() should have failed (push target URL is http)") - } - return nil - }) -} - -func TestServer_Push_RejectMissingHost(t *testing.T) { - testServer_Push_RejectSingleRequest(t, - func(p http.Pusher, r *http.Request) error { - if err := p.Push("https:pushed", nil); err == nil { - return errors.New("Push() should have failed (push target URL missing host)") - } - return nil - }) -} - -func TestServer_Push_RejectRelativePath(t *testing.T) { - testServer_Push_RejectSingleRequest(t, - func(p http.Pusher, r *http.Request) error { - if err := p.Push("../test", nil); err == nil { - return errors.New("Push() should have failed (push target is a relative path)") - } - return nil - }) -} - -func TestServer_Push_RejectForbiddenMethod(t *testing.T) { - testServer_Push_RejectSingleRequest(t, - func(p http.Pusher, r *http.Request) error { - if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil { - return errors.New("Push() should have failed (cannot promise a POST)") - } - return nil - }) -} - -func TestServer_Push_RejectForbiddenHeader(t *testing.T) { - testServer_Push_RejectSingleRequest(t, - func(p http.Pusher, r *http.Request) error { - header := http.Header{ - "Content-Length": {"10"}, - "Content-Encoding": {"gzip"}, - "Trailer": {"Foo"}, - "Te": {"trailers"}, - "Host": {"test.com"}, - ":authority": {"test.com"}, - } - if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil { - return errors.New("Push() should have failed (forbidden headers)") - } - return nil - }) -} - -func TestServer_Push_StateTransitions(t *testing.T) { - const body = "foo" - - gotPromise := make(chan bool) - finishedPush := make(chan bool) - - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - switch r.URL.RequestURI() { - case "/": - if err := w.(http.Pusher).Push("/pushed", nil); err != nil { - t.Errorf("Push error: %v", err) - } - // Don't finish this request until the push finishes so we don't - // nondeterministically interleave output frames with the push. - <-finishedPush - case "/pushed": - <-gotPromise - } - w.Header().Set("Content-Type", "text/html") - w.Header().Set("Content-Length", strconv.Itoa(len(body))) - w.WriteHeader(200) - io.WriteString(w, body) - }) - defer st.Close() - - st.greet() - if st.stream(2) != nil { - t.Fatal("stream 2 should be empty") - } - if got, want := st.streamState(2), stateIdle; got != want { - t.Fatalf("streamState(2)=%v, want %v", got, want) - } - getSlash(st) - // After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote. - st.wantPushPromise() - if got, want := st.streamState(2), stateHalfClosedRemote; got != want { - t.Fatalf("streamState(2)=%v, want %v", got, want) - } - // We stall the HTTP handler for "/pushed" until the above check. If we don't - // stall the handler, then the handler might write HEADERS and DATA and finish - // the stream before we check st.streamState(2) -- should that happen, we'll - // see stateClosed and fail the above check. - close(gotPromise) - st.wantHeaders() - if df := st.wantData(); !df.StreamEnded() { - t.Fatal("expected END_STREAM flag on DATA") - } - if got, want := st.streamState(2), stateClosed; got != want { - t.Fatalf("streamState(2)=%v, want %v", got, want) - } - close(finishedPush) -} - -func TestServer_Push_RejectAfterGoAway(t *testing.T) { - var readyOnce sync.Once - ready := make(chan struct{}) - errc := make(chan error, 2) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - select { - case <-ready: - case <-time.After(5 * time.Second): - errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed") - } - if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { - errc <- fmt.Errorf("Push()=%v, want %v", got, want) - } - errc <- nil - }) - defer st.Close() - st.greet() - getSlash(st) - - // Send GOAWAY and wait for it to be processed. - st.fr.WriteGoAway(1, ErrCodeNo, nil) - go func() { - for { - select { - case <-ready: - return - default: - } - st.sc.serveMsgCh <- func(loopNum int) { - if !st.sc.pushEnabled { - readyOnce.Do(func() { close(ready) }) - } - } - } - }() - if err := <-errc; err != nil { - t.Error(err) - } -} diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go deleted file mode 100644 index b4e83289..00000000 --- a/vendor/golang.org/x/net/http2/server_test.go +++ /dev/null @@ -1,3721 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "crypto/tls" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "golang.org/x/net/http2/hpack" -) - -var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered") - -func stderrv() io.Writer { - if *stderrVerbose { - return os.Stderr - } - - return ioutil.Discard -} - -type serverTester struct { - cc net.Conn // client conn - t testing.TB - ts *httptest.Server - fr *Framer - serverLogBuf bytes.Buffer // logger for httptest.Server - logFilter []string // substrings to filter out - scMu sync.Mutex // guards sc - sc *serverConn - hpackDec *hpack.Decoder - decodedHeaders [][2]string - - // If http2debug!=2, then we capture Frame debug logs that will be written - // to t.Log after a test fails. The read and write logs use separate locks - // and buffers so we don't accidentally introduce synchronization between - // the read and write goroutines, which may hide data races. - frameReadLogMu sync.Mutex - frameReadLogBuf bytes.Buffer - frameWriteLogMu sync.Mutex - frameWriteLogBuf bytes.Buffer - - // writing headers: - headerBuf bytes.Buffer - hpackEnc *hpack.Encoder -} - -func init() { - testHookOnPanicMu = new(sync.Mutex) -} - -func resetHooks() { - testHookOnPanicMu.Lock() - testHookOnPanic = nil - testHookOnPanicMu.Unlock() -} - -type serverTesterOpt string - -var optOnlyServer = serverTesterOpt("only_server") -var optQuiet = serverTesterOpt("quiet_logging") -var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames") - -func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { - resetHooks() - - ts := httptest.NewUnstartedServer(handler) - - tlsConfig := &tls.Config{ - InsecureSkipVerify: true, - NextProtos: []string{NextProtoTLS}, - } - - var onlyServer, quiet, framerReuseFrames bool - h2server := new(Server) - for _, opt := range opts { - switch v := opt.(type) { - case func(*tls.Config): - v(tlsConfig) - case func(*httptest.Server): - v(ts) - case func(*Server): - v(h2server) - case serverTesterOpt: - switch v { - case optOnlyServer: - onlyServer = true - case optQuiet: - quiet = true - case optFramerReuseFrames: - framerReuseFrames = true - } - case func(net.Conn, http.ConnState): - ts.Config.ConnState = v - default: - t.Fatalf("unknown newServerTester option type %T", v) - } - } - - ConfigureServer(ts.Config, h2server) - - st := &serverTester{ - t: t, - ts: ts, - } - st.hpackEnc = hpack.NewEncoder(&st.headerBuf) - st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField) - - ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config - if quiet { - ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) - } else { - ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags) - } - ts.StartTLS() - - if VerboseLogs { - t.Logf("Running test server at: %s", ts.URL) - } - testHookGetServerConn = func(v *serverConn) { - st.scMu.Lock() - defer st.scMu.Unlock() - st.sc = v - } - log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st})) - if !onlyServer { - cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) - if err != nil { - t.Fatal(err) - } - st.cc = cc - st.fr = NewFramer(cc, cc) - if framerReuseFrames { - st.fr.SetReuseFrames() - } - if !logFrameReads && !logFrameWrites { - st.fr.debugReadLoggerf = func(m string, v ...interface{}) { - m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" - st.frameReadLogMu.Lock() - fmt.Fprintf(&st.frameReadLogBuf, m, v...) - st.frameReadLogMu.Unlock() - } - st.fr.debugWriteLoggerf = func(m string, v ...interface{}) { - m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" - st.frameWriteLogMu.Lock() - fmt.Fprintf(&st.frameWriteLogBuf, m, v...) - st.frameWriteLogMu.Unlock() - } - st.fr.logReads = true - st.fr.logWrites = true - } - } - return st -} - -func (st *serverTester) closeConn() { - st.scMu.Lock() - defer st.scMu.Unlock() - st.sc.conn.Close() -} - -func (st *serverTester) addLogFilter(phrase string) { - st.logFilter = append(st.logFilter, phrase) -} - -func (st *serverTester) stream(id uint32) *stream { - ch := make(chan *stream, 1) - st.sc.serveMsgCh <- func(int) { - ch <- st.sc.streams[id] - } - return <-ch -} - -func (st *serverTester) streamState(id uint32) streamState { - ch := make(chan streamState, 1) - st.sc.serveMsgCh <- func(int) { - state, _ := st.sc.state(id) - ch <- state - } - return <-ch -} - -// loopNum reports how many times this conn's select loop has gone around. -func (st *serverTester) loopNum() int { - lastc := make(chan int, 1) - st.sc.serveMsgCh <- func(loopNum int) { - lastc <- loopNum - } - return <-lastc -} - -// awaitIdle heuristically awaits for the server conn's select loop to be idle. -// The heuristic is that the server connection's serve loop must schedule -// 50 times in a row without any channel sends or receives occurring. -func (st *serverTester) awaitIdle() { - remain := 50 - last := st.loopNum() - for remain > 0 { - n := st.loopNum() - if n == last+1 { - remain-- - } else { - remain = 50 - } - last = n - } -} - -func (st *serverTester) Close() { - if st.t.Failed() { - st.frameReadLogMu.Lock() - if st.frameReadLogBuf.Len() > 0 { - st.t.Logf("Framer read log:\n%s", st.frameReadLogBuf.String()) - } - st.frameReadLogMu.Unlock() - - st.frameWriteLogMu.Lock() - if st.frameWriteLogBuf.Len() > 0 { - st.t.Logf("Framer write log:\n%s", st.frameWriteLogBuf.String()) - } - st.frameWriteLogMu.Unlock() - - // If we failed already (and are likely in a Fatal, - // unwindowing), force close the connection, so the - // httptest.Server doesn't wait forever for the conn - // to close. - if st.cc != nil { - st.cc.Close() - } - } - st.ts.Close() - if st.cc != nil { - st.cc.Close() - } - log.SetOutput(os.Stderr) -} - -// greet initiates the client's HTTP/2 connection into a state where -// frames may be sent. -func (st *serverTester) greet() { - st.greetAndCheckSettings(func(Setting) error { return nil }) -} - -func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) { - st.writePreface() - st.writeInitialSettings() - st.wantSettings().ForeachSetting(checkSetting) - st.writeSettingsAck() - - // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order. - var gotSettingsAck bool - var gotWindowUpdate bool - - for i := 0; i < 2; i++ { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) - } - switch f := f.(type) { - case *SettingsFrame: - if !f.Header().Flags.Has(FlagSettingsAck) { - st.t.Fatal("Settings Frame didn't have ACK set") - } - gotSettingsAck = true - - case *WindowUpdateFrame: - if f.FrameHeader.StreamID != 0 { - st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID) - } - incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize) - if f.Increment != incr { - st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr) - } - gotWindowUpdate = true - - default: - st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f) - } - } - - if !gotSettingsAck { - st.t.Fatalf("Didn't get a settings ACK") - } - if !gotWindowUpdate { - st.t.Fatalf("Didn't get a window update") - } -} - -func (st *serverTester) writePreface() { - n, err := st.cc.Write(clientPreface) - if err != nil { - st.t.Fatalf("Error writing client preface: %v", err) - } - if n != len(clientPreface) { - st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface)) - } -} - -func (st *serverTester) writeInitialSettings() { - if err := st.fr.WriteSettings(); err != nil { - st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) - } -} - -func (st *serverTester) writeSettingsAck() { - if err := st.fr.WriteSettingsAck(); err != nil { - st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) - } -} - -func (st *serverTester) writeHeaders(p HeadersFrameParam) { - if err := st.fr.WriteHeaders(p); err != nil { - st.t.Fatalf("Error writing HEADERS: %v", err) - } -} - -func (st *serverTester) writePriority(id uint32, p PriorityParam) { - if err := st.fr.WritePriority(id, p); err != nil { - st.t.Fatalf("Error writing PRIORITY: %v", err) - } -} - -func (st *serverTester) encodeHeaderField(k, v string) { - err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) - if err != nil { - st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) - } -} - -// encodeHeaderRaw is the magic-free version of encodeHeader. -// It takes 0 or more (k, v) pairs and encodes them. -func (st *serverTester) encodeHeaderRaw(headers ...string) []byte { - if len(headers)%2 == 1 { - panic("odd number of kv args") - } - st.headerBuf.Reset() - for len(headers) > 0 { - k, v := headers[0], headers[1] - st.encodeHeaderField(k, v) - headers = headers[2:] - } - return st.headerBuf.Bytes() -} - -// encodeHeader encodes headers and returns their HPACK bytes. headers -// must contain an even number of key/value pairs. There may be -// multiple pairs for keys (e.g. "cookie"). The :method, :path, and -// :scheme headers default to GET, / and https. The :authority header -// defaults to st.ts.Listener.Addr(). -func (st *serverTester) encodeHeader(headers ...string) []byte { - if len(headers)%2 == 1 { - panic("odd number of kv args") - } - - st.headerBuf.Reset() - defaultAuthority := st.ts.Listener.Addr().String() - - if len(headers) == 0 { - // Fast path, mostly for benchmarks, so test code doesn't pollute - // profiles when we're looking to improve server allocations. - st.encodeHeaderField(":method", "GET") - st.encodeHeaderField(":scheme", "https") - st.encodeHeaderField(":authority", defaultAuthority) - st.encodeHeaderField(":path", "/") - return st.headerBuf.Bytes() - } - - if len(headers) == 2 && headers[0] == ":method" { - // Another fast path for benchmarks. - st.encodeHeaderField(":method", headers[1]) - st.encodeHeaderField(":scheme", "https") - st.encodeHeaderField(":authority", defaultAuthority) - st.encodeHeaderField(":path", "/") - return st.headerBuf.Bytes() - } - - pseudoCount := map[string]int{} - keys := []string{":method", ":scheme", ":authority", ":path"} - vals := map[string][]string{ - ":method": {"GET"}, - ":scheme": {"https"}, - ":authority": {defaultAuthority}, - ":path": {"/"}, - } - for len(headers) > 0 { - k, v := headers[0], headers[1] - headers = headers[2:] - if _, ok := vals[k]; !ok { - keys = append(keys, k) - } - if strings.HasPrefix(k, ":") { - pseudoCount[k]++ - if pseudoCount[k] == 1 { - vals[k] = []string{v} - } else { - // Allows testing of invalid headers w/ dup pseudo fields. - vals[k] = append(vals[k], v) - } - } else { - vals[k] = append(vals[k], v) - } - } - for _, k := range keys { - for _, v := range vals[k] { - st.encodeHeaderField(k, v) - } - } - return st.headerBuf.Bytes() -} - -// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set. -func (st *serverTester) bodylessReq1(headers ...string) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(headers...), - EndStream: true, - EndHeaders: true, - }) -} - -func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { - if err := st.fr.WriteData(streamID, endStream, data); err != nil { - st.t.Fatalf("Error writing DATA: %v", err) - } -} - -func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) { - if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil { - st.t.Fatalf("Error writing DATA: %v", err) - } -} - -func readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) { - ch := make(chan interface{}, 1) - go func() { - fr, err := fr.ReadFrame() - if err != nil { - ch <- err - } else { - ch <- fr - } - }() - t := time.NewTimer(wait) - select { - case v := <-ch: - t.Stop() - if fr, ok := v.(Frame); ok { - return fr, nil - } - return nil, v.(error) - case <-t.C: - return nil, errors.New("timeout waiting for frame") - } -} - -func (st *serverTester) readFrame() (Frame, error) { - return readFrameTimeout(st.fr, 2*time.Second) -} - -func (st *serverTester) wantHeaders() *HeadersFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a HEADERS frame: %v", err) - } - hf, ok := f.(*HeadersFrame) - if !ok { - st.t.Fatalf("got a %T; want *HeadersFrame", f) - } - return hf -} - -func (st *serverTester) wantContinuation() *ContinuationFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err) - } - cf, ok := f.(*ContinuationFrame) - if !ok { - st.t.Fatalf("got a %T; want *ContinuationFrame", f) - } - return cf -} - -func (st *serverTester) wantData() *DataFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a DATA frame: %v", err) - } - df, ok := f.(*DataFrame) - if !ok { - st.t.Fatalf("got a %T; want *DataFrame", f) - } - return df -} - -func (st *serverTester) wantSettings() *SettingsFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) - } - sf, ok := f.(*SettingsFrame) - if !ok { - st.t.Fatalf("got a %T; want *SettingsFrame", f) - } - return sf -} - -func (st *serverTester) wantPing() *PingFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a PING frame: %v", err) - } - pf, ok := f.(*PingFrame) - if !ok { - st.t.Fatalf("got a %T; want *PingFrame", f) - } - return pf -} - -func (st *serverTester) wantGoAway() *GoAwayFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err) - } - gf, ok := f.(*GoAwayFrame) - if !ok { - st.t.Fatalf("got a %T; want *GoAwayFrame", f) - } - return gf -} - -func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting an RSTStream frame: %v", err) - } - rs, ok := f.(*RSTStreamFrame) - if !ok { - st.t.Fatalf("got a %T; want *RSTStreamFrame", f) - } - if rs.FrameHeader.StreamID != streamID { - st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID) - } - if rs.ErrCode != errCode { - st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode) - } -} - -func (st *serverTester) wantWindowUpdate(streamID, incr uint32) { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err) - } - wu, ok := f.(*WindowUpdateFrame) - if !ok { - st.t.Fatalf("got a %T; want *WindowUpdateFrame", f) - } - if wu.FrameHeader.StreamID != streamID { - st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) - } - if wu.Increment != incr { - st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) - } -} - -func (st *serverTester) wantSettingsAck() { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) - } - sf, ok := f.(*SettingsFrame) - if !ok { - st.t.Fatalf("Wanting a settings ACK, received a %T", f) - } - if !sf.Header().Flags.Has(FlagSettingsAck) { - st.t.Fatal("Settings Frame didn't have ACK set") - } -} - -func (st *serverTester) wantPushPromise() *PushPromiseFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) - } - ppf, ok := f.(*PushPromiseFrame) - if !ok { - st.t.Fatalf("Wanted PushPromise, received %T", ppf) - } - return ppf -} - -func TestServer(t *testing.T) { - gotReq := make(chan bool, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Foo", "Bar") - gotReq <- true - }) - defer st.Close() - - covers("3.5", ` - The server connection preface consists of a potentially empty - SETTINGS frame ([SETTINGS]) that MUST be the first frame the - server sends in the HTTP/2 connection. - `) - - st.greet() - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(), - EndStream: true, // no DATA frames - EndHeaders: true, - }) - - select { - case <-gotReq: - case <-time.After(2 * time.Second): - t.Error("timeout waiting for request") - } -} - -func TestServer_Request_Get(t *testing.T) { - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader("foo-bar", "some-value"), - EndStream: true, // no DATA frames - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.Method != "GET" { - t.Errorf("Method = %q; want GET", r.Method) - } - if r.URL.Path != "/" { - t.Errorf("URL.Path = %q; want /", r.URL.Path) - } - if r.ContentLength != 0 { - t.Errorf("ContentLength = %v; want 0", r.ContentLength) - } - if r.Close { - t.Error("Close = true; want false") - } - if !strings.Contains(r.RemoteAddr, ":") { - t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr) - } - if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 { - t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor) - } - wantHeader := http.Header{ - "Foo-Bar": []string{"some-value"}, - } - if !reflect.DeepEqual(r.Header, wantHeader) { - t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) - } - if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { - t.Errorf("Read = %d, %v; want 0, EOF", n, err) - } - }) -} - -func TestServer_Request_Get_PathSlashes(t *testing.T) { - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":path", "/%2f/"), - EndStream: true, // no DATA frames - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.RequestURI != "/%2f/" { - t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI) - } - if r.URL.Path != "///" { - t.Errorf("URL.Path = %q; want ///", r.URL.Path) - } - }) -} - -// TODO: add a test with EndStream=true on the HEADERS but setting a -// Content-Length anyway. Should we just omit it and force it to -// zero? - -func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) { - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: true, - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.Method != "POST" { - t.Errorf("Method = %q; want POST", r.Method) - } - if r.ContentLength != 0 { - t.Errorf("ContentLength = %v; want 0", r.ContentLength) - } - if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { - t.Errorf("Read = %d, %v; want 0, EOF", n, err) - } - }) -} - -func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) { - testBodyContents(t, -1, "", func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, nil) // just kidding. empty body. - }) -} - -func TestServer_Request_Post_Body_OneData(t *testing.T) { - const content = "Some content" - testBodyContents(t, -1, content, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, []byte(content)) - }) -} - -func TestServer_Request_Post_Body_TwoData(t *testing.T) { - const content = "Some content" - testBodyContents(t, -1, content, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, false, []byte(content[:5])) - st.writeData(1, true, []byte(content[5:])) - }) -} - -func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) { - const content = "Some content" - testBodyContents(t, int64(len(content)), content, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader( - ":method", "POST", - "content-length", strconv.Itoa(len(content)), - ), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, []byte(content)) - }) -} - -func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) { - testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes", - func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader( - ":method", "POST", - "content-length", "3", - ), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, []byte("12")) - }) -} - -func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) { - testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes", - func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader( - ":method", "POST", - "content-length", "4", - ), - EndStream: false, // to say DATA frames are coming - EndHeaders: true, - }) - st.writeData(1, true, []byte("12345")) - }) -} - -func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) { - testServerRequest(t, write, func(r *http.Request) { - if r.Method != "POST" { - t.Errorf("Method = %q; want POST", r.Method) - } - if r.ContentLength != wantContentLength { - t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) - } - all, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) - } - if string(all) != wantBody { - t.Errorf("Read = %q; want %q", all, wantBody) - } - if err := r.Body.Close(); err != nil { - t.Fatalf("Close: %v", err) - } - }) -} - -func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) { - testServerRequest(t, write, func(r *http.Request) { - if r.Method != "POST" { - t.Errorf("Method = %q; want POST", r.Method) - } - if r.ContentLength != wantContentLength { - t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) - } - all, err := ioutil.ReadAll(r.Body) - if err == nil { - t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.", - wantReadError, all) - } - if !strings.Contains(err.Error(), wantReadError) { - t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError) - } - if err := r.Body.Close(); err != nil { - t.Fatalf("Close: %v", err) - } - }) -} - -// Using a Host header, instead of :authority -func TestServer_Request_Get_Host(t *testing.T) { - const host = "example.com" - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":authority", "", "host", host), - EndStream: true, - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.Host != host { - t.Errorf("Host = %q; want %q", r.Host, host) - } - }) -} - -// Using an :authority pseudo-header, instead of Host -func TestServer_Request_Get_Authority(t *testing.T) { - const host = "example.com" - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":authority", host), - EndStream: true, - EndHeaders: true, - }) - }, func(r *http.Request) { - if r.Host != host { - t.Errorf("Host = %q; want %q", r.Host, host) - } - }) -} - -func TestServer_Request_WithContinuation(t *testing.T) { - wantHeader := http.Header{ - "Foo-One": []string{"value-one"}, - "Foo-Two": []string{"value-two"}, - "Foo-Three": []string{"value-three"}, - } - testServerRequest(t, func(st *serverTester) { - fullHeaders := st.encodeHeader( - "foo-one", "value-one", - "foo-two", "value-two", - "foo-three", "value-three", - ) - remain := fullHeaders - chunks := 0 - for len(remain) > 0 { - const maxChunkSize = 5 - chunk := remain - if len(chunk) > maxChunkSize { - chunk = chunk[:maxChunkSize] - } - remain = remain[len(chunk):] - - if chunks == 0 { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: chunk, - EndStream: true, // no DATA frames - EndHeaders: false, // we'll have continuation frames - }) - } else { - err := st.fr.WriteContinuation(1, len(remain) == 0, chunk) - if err != nil { - t.Fatal(err) - } - } - chunks++ - } - if chunks < 2 { - t.Fatal("too few chunks") - } - }, func(r *http.Request) { - if !reflect.DeepEqual(r.Header, wantHeader) { - t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) - } - }) -} - -// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field") -func TestServer_Request_CookieConcat(t *testing.T) { - const host = "example.com" - testServerRequest(t, func(st *serverTester) { - st.bodylessReq1( - ":authority", host, - "cookie", "a=b", - "cookie", "c=d", - "cookie", "e=f", - ) - }, func(r *http.Request) { - const want = "a=b; c=d; e=f" - if got := r.Header.Get("Cookie"); got != want { - t.Errorf("Cookie = %q; want %q", got, want) - } - }) -} - -func TestServer_Request_Reject_CapitalHeader(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") }) -} - -func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") }) -} - -func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") }) -} - -func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") }) -} - -func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") }) -} - -func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") }) -} - -func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") }) -} - -func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") }) -} - -func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) { - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid value" ... - testRejectRequest(t, func(st *serverTester) { - st.addLogFilter("duplicate pseudo-header") - st.bodylessReq1(":method", "GET", ":method", "POST") - }) -} - -func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) { - // 8.1.2.3 Request Pseudo-Header Fields - // "All pseudo-header fields MUST appear in the header block - // before regular header fields. Any request or response that - // contains a pseudo-header field that appears in a header - // block after a regular header field MUST be treated as - // malformed (Section 8.1.2.6)." - testRejectRequest(t, func(st *serverTester) { - st.addLogFilter("pseudo-header after regular header") - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"}) - enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"}) - enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"}) - enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"}) - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: buf.Bytes(), - EndStream: true, - EndHeaders: true, - }) - }) -} - -func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") }) -} - -func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") }) -} - -func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") }) -} - -func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { - testRejectRequest(t, func(st *serverTester) { - st.addLogFilter(`invalid pseudo-header ":unknown_thing"`) - st.bodylessReq1(":unknown_thing", "") - }) -} - -func testRejectRequest(t *testing.T, send func(*serverTester)) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - t.Error("server request made it to handler; should've been rejected") - }) - defer st.Close() - - st.greet() - send(st) - st.wantRSTStream(1, ErrCodeProtocol) -} - -func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - t.Error("server request made it to handler; should've been rejected") - }, optQuiet) - defer st.Close() - - st.greet() - send(st) - gf := st.wantGoAway() - if gf.ErrCode != ErrCodeProtocol { - t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol) - } -} - -// Section 5.1, on idle connections: "Receiving any frame other than -// HEADERS or PRIORITY on a stream in this state MUST be treated as a -// connection error (Section 5.4.1) of type PROTOCOL_ERROR." -func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) { - testRejectRequestWithProtocolError(t, func(st *serverTester) { - st.fr.WriteWindowUpdate(123, 456) - }) -} -func TestRejectFrameOnIdle_Data(t *testing.T) { - testRejectRequestWithProtocolError(t, func(st *serverTester) { - st.fr.WriteData(123, true, nil) - }) -} -func TestRejectFrameOnIdle_RSTStream(t *testing.T) { - testRejectRequestWithProtocolError(t, func(st *serverTester) { - st.fr.WriteRSTStream(123, ErrCodeCancel) - }) -} - -func TestServer_Request_Connect(t *testing.T) { - testServerRequest(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeaderRaw( - ":method", "CONNECT", - ":authority", "example.com:123", - ), - EndStream: true, - EndHeaders: true, - }) - }, func(r *http.Request) { - if g, w := r.Method, "CONNECT"; g != w { - t.Errorf("Method = %q; want %q", g, w) - } - if g, w := r.RequestURI, "example.com:123"; g != w { - t.Errorf("RequestURI = %q; want %q", g, w) - } - if g, w := r.URL.Host, "example.com:123"; g != w { - t.Errorf("URL.Host = %q; want %q", g, w) - } - }) -} - -func TestServer_Request_Connect_InvalidPath(t *testing.T) { - testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeaderRaw( - ":method", "CONNECT", - ":authority", "example.com:123", - ":path", "/bogus", - ), - EndStream: true, - EndHeaders: true, - }) - }) -} - -func TestServer_Request_Connect_InvalidScheme(t *testing.T) { - testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeaderRaw( - ":method", "CONNECT", - ":authority", "example.com:123", - ":scheme", "https", - ), - EndStream: true, - EndHeaders: true, - }) - }) -} - -func TestServer_Ping(t *testing.T) { - st := newServerTester(t, nil) - defer st.Close() - st.greet() - - // Server should ignore this one, since it has ACK set. - ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128} - if err := st.fr.WritePing(true, ackPingData); err != nil { - t.Fatal(err) - } - - // But the server should reply to this one, since ACK is false. - pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} - if err := st.fr.WritePing(false, pingData); err != nil { - t.Fatal(err) - } - - pf := st.wantPing() - if !pf.Flags.Has(FlagPingAck) { - t.Error("response ping doesn't have ACK set") - } - if pf.Data != pingData { - t.Errorf("response ping has data %q; want %q", pf.Data, pingData) - } -} - -func TestServer_RejectsLargeFrames(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("see golang.org/issue/13434") - } - - st := newServerTester(t, nil) - defer st.Close() - st.greet() - - // Write too large of a frame (too large by one byte) - // We ignore the return value because it's expected that the server - // will only read the first 9 bytes (the headre) and then disconnect. - st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1)) - - gf := st.wantGoAway() - if gf.ErrCode != ErrCodeFrameSize { - t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) - } - if st.serverLogBuf.Len() != 0 { - // Previously we spun here for a bit until the GOAWAY disconnect - // timer fired, logging while we fired. - t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes()) - } -} - -func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { - puppet := newHandlerPuppet() - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - puppet.act(w, r) - }) - defer st.Close() - defer puppet.done() - - st.greet() - - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // data coming - EndHeaders: true, - }) - st.writeData(1, false, []byte("abcdef")) - puppet.do(readBodyHandler(t, "abc")) - st.wantWindowUpdate(0, 3) - st.wantWindowUpdate(1, 3) - - puppet.do(readBodyHandler(t, "def")) - st.wantWindowUpdate(0, 3) - st.wantWindowUpdate(1, 3) - - st.writeData(1, true, []byte("ghijkl")) // END_STREAM here - puppet.do(readBodyHandler(t, "ghi")) - puppet.do(readBodyHandler(t, "jkl")) - st.wantWindowUpdate(0, 3) - st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM -} - -// the version of the TestServer_Handler_Sends_WindowUpdate with padding. -// See golang.org/issue/16556 -func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) { - puppet := newHandlerPuppet() - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - puppet.act(w, r) - }) - defer st.Close() - defer puppet.done() - - st.greet() - - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, - EndHeaders: true, - }) - st.writeDataPadded(1, false, []byte("abcdef"), []byte{0, 0, 0, 0}) - - // Expect to immediately get our 5 bytes of padding back for - // both the connection and stream (4 bytes of padding + 1 byte of length) - st.wantWindowUpdate(0, 5) - st.wantWindowUpdate(1, 5) - - puppet.do(readBodyHandler(t, "abc")) - st.wantWindowUpdate(0, 3) - st.wantWindowUpdate(1, 3) - - puppet.do(readBodyHandler(t, "def")) - st.wantWindowUpdate(0, 3) - st.wantWindowUpdate(1, 3) -} - -func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { - st := newServerTester(t, nil) - defer st.Close() - st.greet() - if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { - t.Fatal(err) - } - gf := st.wantGoAway() - if gf.ErrCode != ErrCodeFlowControl { - t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) - } - if gf.LastStreamID != 0 { - t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) - } -} - -func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) { - inHandler := make(chan bool) - blockHandler := make(chan bool) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - <-blockHandler - }) - defer st.Close() - defer close(blockHandler) - st.greet() - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // keep it open - EndHeaders: true, - }) - <-inHandler - // Send a bogus window update: - if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil { - t.Fatal(err) - } - st.wantRSTStream(1, ErrCodeFlowControl) -} - -// testServerPostUnblock sends a hanging POST with unsent data to handler, -// then runs fn once in the handler, and verifies that the error returned from -// handler is acceptable. It fails if takes over 5 seconds for handler to exit. -func testServerPostUnblock(t *testing.T, - handler func(http.ResponseWriter, *http.Request) error, - fn func(*serverTester), - checkErr func(error), - otherHeaders ...string) { - inHandler := make(chan bool) - errc := make(chan error, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - errc <- handler(w, r) - }) - defer st.Close() - st.greet() - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...), - EndStream: false, // keep it open - EndHeaders: true, - }) - <-inHandler - fn(st) - select { - case err := <-errc: - if checkErr != nil { - checkErr(err) - } - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for Handler to return") - } -} - -func TestServer_RSTStream_Unblocks_Read(t *testing.T) { - testServerPostUnblock(t, - func(w http.ResponseWriter, r *http.Request) (err error) { - _, err = r.Body.Read(make([]byte, 1)) - return - }, - func(st *serverTester) { - if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { - t.Fatal(err) - } - }, - func(err error) { - want := StreamError{StreamID: 0x1, Code: 0x8} - if !reflect.DeepEqual(err, want) { - t.Errorf("Read error = %v; want %v", err, want) - } - }, - ) -} - -func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) { - // Run this test a bunch, because it doesn't always - // deadlock. But with a bunch, it did. - n := 50 - if testing.Short() { - n = 5 - } - for i := 0; i < n; i++ { - testServer_RSTStream_Unblocks_Header_Write(t) - } -} - -func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) { - inHandler := make(chan bool, 1) - unblockHandler := make(chan bool, 1) - headerWritten := make(chan bool, 1) - wroteRST := make(chan bool, 1) - - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - <-wroteRST - w.Header().Set("foo", "bar") - w.WriteHeader(200) - w.(http.Flusher).Flush() - headerWritten <- true - <-unblockHandler - }) - defer st.Close() - - st.greet() - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // keep it open - EndHeaders: true, - }) - <-inHandler - if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { - t.Fatal(err) - } - wroteRST <- true - st.awaitIdle() - select { - case <-headerWritten: - case <-time.After(2 * time.Second): - t.Error("timeout waiting for header write") - } - unblockHandler <- true -} - -func TestServer_DeadConn_Unblocks_Read(t *testing.T) { - testServerPostUnblock(t, - func(w http.ResponseWriter, r *http.Request) (err error) { - _, err = r.Body.Read(make([]byte, 1)) - return - }, - func(st *serverTester) { st.cc.Close() }, - func(err error) { - if err == nil { - t.Error("unexpected nil error from Request.Body.Read") - } - }, - ) -} - -var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error { - <-w.(http.CloseNotifier).CloseNotify() - return nil -} - -func TestServer_CloseNotify_After_RSTStream(t *testing.T) { - testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { - if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { - t.Fatal(err) - } - }, nil) -} - -func TestServer_CloseNotify_After_ConnClose(t *testing.T) { - testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil) -} - -// that CloseNotify unblocks after a stream error due to the client's -// problem that's unrelated to them explicitly canceling it (which is -// TestServer_CloseNotify_After_RSTStream above) -func TestServer_CloseNotify_After_StreamError(t *testing.T) { - testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { - // data longer than declared Content-Length => stream error - st.writeData(1, true, []byte("1234")) - }, nil, "content-length", "3") -} - -func TestServer_StateTransitions(t *testing.T) { - var st *serverTester - inHandler := make(chan bool) - writeData := make(chan bool) - leaveHandler := make(chan bool) - st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - if st.stream(1) == nil { - t.Errorf("nil stream 1 in handler") - } - if got, want := st.streamState(1), stateOpen; got != want { - t.Errorf("in handler, state is %v; want %v", got, want) - } - writeData <- true - if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF { - t.Errorf("body read = %d, %v; want 0, EOF", n, err) - } - if got, want := st.streamState(1), stateHalfClosedRemote; got != want { - t.Errorf("in handler, state is %v; want %v", got, want) - } - - <-leaveHandler - }) - st.greet() - if st.stream(1) != nil { - t.Fatal("stream 1 should be empty") - } - if got := st.streamState(1); got != stateIdle { - t.Fatalf("stream 1 should be idle; got %v", got) - } - - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, // keep it open - EndHeaders: true, - }) - <-inHandler - <-writeData - st.writeData(1, true, nil) - - leaveHandler <- true - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Fatal("expected END_STREAM flag") - } - - if got, want := st.streamState(1), stateClosed; got != want { - t.Errorf("at end, state is %v; want %v", got, want) - } - if st.stream(1) != nil { - t.Fatal("at end, stream 1 should be gone") - } -} - -// test HEADERS w/o EndHeaders + another HEADERS (should get rejected) -func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: false, - }) - st.writeHeaders(HeadersFrameParam{ // Not a continuation. - StreamID: 3, // different stream. - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - }) -} - -// test HEADERS w/o EndHeaders + PING (should get rejected) -func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: false, - }) - if err := st.fr.WritePing(false, [8]byte{}); err != nil { - t.Fatal(err) - } - }) -} - -// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected) -func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - st.wantHeaders() - if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { - t.Fatal(err) - } - }) -} - -// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID -func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: false, - }) - if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { - t.Fatal(err) - } - }) -} - -// No HEADERS on stream 0. -func TestServer_Rejects_Headers0(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - st.writeHeaders(HeadersFrameParam{ - StreamID: 0, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - }) -} - -// No CONTINUATION on stream 0. -func TestServer_Rejects_Continuation0(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { - t.Fatal(err) - } - }) -} - -// No PRIORITY on stream 0. -func TestServer_Rejects_Priority0(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - st.writePriority(0, PriorityParam{StreamDep: 1}) - }) -} - -// No HEADERS frame with a self-dependence. -func TestServer_Rejects_HeadersSelfDependence(t *testing.T) { - testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - Priority: PriorityParam{StreamDep: 1}, - }) - }) -} - -// No PRIORTY frame with a self-dependence. -func TestServer_Rejects_PrioritySelfDependence(t *testing.T) { - testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - st.writePriority(1, PriorityParam{StreamDep: 1}) - }) -} - -func TestServer_Rejects_PushPromise(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - pp := PushPromiseParam{ - StreamID: 1, - PromiseID: 3, - } - if err := st.fr.WritePushPromise(pp); err != nil { - t.Fatal(err) - } - }) -} - -// testServerRejectsConn tests that the server hangs up with a GOAWAY -// frame and a server close after the client does something -// deserving a CONNECTION_ERROR. -func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) - st.addLogFilter("connection error: PROTOCOL_ERROR") - defer st.Close() - st.greet() - writeReq(st) - - st.wantGoAway() - errc := make(chan error, 1) - go func() { - fr, err := st.fr.ReadFrame() - if err == nil { - err = fmt.Errorf("got frame of type %T", fr) - } - errc <- err - }() - select { - case err := <-errc: - if err != io.EOF { - t.Errorf("ReadFrame = %v; want io.EOF", err) - } - case <-time.After(2 * time.Second): - t.Error("timeout waiting for disconnect") - } -} - -// testServerRejectsStream tests that the server sends a RST_STREAM with the provided -// error code after a client sends a bogus request. -func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) - defer st.Close() - st.greet() - writeReq(st) - st.wantRSTStream(1, code) -} - -// testServerRequest sets up an idle HTTP/2 connection and lets you -// write a single request with writeReq, and then verify that the -// *http.Request is built correctly in checkReq. -func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) { - gotReq := make(chan bool, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - if r.Body == nil { - t.Fatal("nil Body") - } - checkReq(r) - gotReq <- true - }) - defer st.Close() - - st.greet() - writeReq(st) - - select { - case <-gotReq: - case <-time.After(2 * time.Second): - t.Error("timeout waiting for request") - } -} - -func getSlash(st *serverTester) { st.bodylessReq1() } - -func TestServer_Response_NoData(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - // Nothing. - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Fatal("want END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - }) -} - -func TestServer_Response_NoData_Header_FooBar(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Foo-Bar", "some-value") - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Fatal("want END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"foo-bar", "some-value"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", "0"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - }) -} - -func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) { - const msg = "this is HTML." - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Content-Type", "foo/bar") - io.WriteString(w, msg) - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("don't want END_STREAM, expecting data") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "foo/bar"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - df := st.wantData() - if !df.StreamEnded() { - t.Error("expected DATA to have END_STREAM flag") - } - if got := string(df.Data()); got != msg { - t.Errorf("got DATA %q; want %q", got, msg) - } - }) -} - -func TestServer_Response_TransferEncoding_chunked(t *testing.T) { - const msg = "hi" - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Transfer-Encoding", "chunked") // should be stripped - io.WriteString(w, msg) - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - }) -} - -// Header accessed only after the initial write. -func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) { - const msg = "this is HTML." - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - io.WriteString(w, msg) - w.Header().Set("foo", "should be ignored") - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - }) -} - -// Header accessed before the initial write and later mutated. -func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) { - const msg = "this is HTML." - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("foo", "proper value") - io.WriteString(w, msg) - w.Header().Set("foo", "should be ignored") - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"foo", "proper value"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - }) -} - -func TestServer_Response_Data_SniffLenType(t *testing.T) { - const msg = "this is HTML." - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - io.WriteString(w, msg) - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("don't want END_STREAM, expecting data") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - df := st.wantData() - if !df.StreamEnded() { - t.Error("expected DATA to have END_STREAM flag") - } - if got := string(df.Data()); got != msg { - t.Errorf("got DATA %q; want %q", got, msg) - } - }) -} - -func TestServer_Response_Header_Flush_MidWrite(t *testing.T) { - const msg = "this is HTML" - const msg2 = ", and this is the next chunk" - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - io.WriteString(w, msg) - w.(http.Flusher).Flush() - io.WriteString(w, msg2) - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/html; charset=utf-8"}, // sniffed - // and no content-length - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - { - df := st.wantData() - if df.StreamEnded() { - t.Error("unexpected END_STREAM flag") - } - if got := string(df.Data()); got != msg { - t.Errorf("got DATA %q; want %q", got, msg) - } - } - { - df := st.wantData() - if !df.StreamEnded() { - t.Error("wanted END_STREAM flag on last data chunk") - } - if got := string(df.Data()); got != msg2 { - t.Errorf("got DATA %q; want %q", got, msg2) - } - } - }) -} - -func TestServer_Response_LargeWrite(t *testing.T) { - const size = 1 << 20 - const maxFrameSize = 16 << 10 - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - n, err := w.Write(bytes.Repeat([]byte("a"), size)) - if err != nil { - return fmt.Errorf("Write error: %v", err) - } - if n != size { - return fmt.Errorf("wrong size %d from Write", n) - } - return nil - }, func(st *serverTester) { - if err := st.fr.WriteSettings( - Setting{SettingInitialWindowSize, 0}, - Setting{SettingMaxFrameSize, maxFrameSize}, - ); err != nil { - t.Fatal(err) - } - st.wantSettingsAck() - - getSlash(st) // make the single request - - // Give the handler quota to write: - if err := st.fr.WriteWindowUpdate(1, size); err != nil { - t.Fatal(err) - } - // Give the handler quota to write to connection-level - // window as well - if err := st.fr.WriteWindowUpdate(0, size); err != nil { - t.Fatal(err) - } - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, // sniffed - // and no content-length - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - var bytes, frames int - for { - df := st.wantData() - bytes += len(df.Data()) - frames++ - for _, b := range df.Data() { - if b != 'a' { - t.Fatal("non-'a' byte seen in DATA") - } - } - if df.StreamEnded() { - break - } - } - if bytes != size { - t.Errorf("Got %d bytes; want %d", bytes, size) - } - if want := int(size / maxFrameSize); frames < want || frames > want*2 { - t.Errorf("Got %d frames; want %d", frames, size) - } - }) -} - -// Test that the handler can't write more than the client allows -func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { - // Make these reads. Before each read, the client adds exactly enough - // flow-control to satisfy the read. Numbers chosen arbitrarily. - reads := []int{123, 1, 13, 127} - size := 0 - for _, n := range reads { - size += n - } - - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.(http.Flusher).Flush() - n, err := w.Write(bytes.Repeat([]byte("a"), size)) - if err != nil { - return fmt.Errorf("Write error: %v", err) - } - if n != size { - return fmt.Errorf("wrong size %d from Write", n) - } - return nil - }, func(st *serverTester) { - // Set the window size to something explicit for this test. - // It's also how much initial data we expect. - if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil { - t.Fatal(err) - } - st.wantSettingsAck() - - getSlash(st) // make the single request - - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - - df := st.wantData() - if got := len(df.Data()); got != reads[0] { - t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got) - } - - for _, quota := range reads[1:] { - if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { - t.Fatal(err) - } - df := st.wantData() - if int(quota) != len(df.Data()) { - t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) - } - } - }) -} - -// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM. -func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) { - const size = 1 << 20 - const maxFrameSize = 16 << 10 - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.(http.Flusher).Flush() - errc := make(chan error, 1) - go func() { - _, err := w.Write(bytes.Repeat([]byte("a"), size)) - errc <- err - }() - select { - case err := <-errc: - if err == nil { - return errors.New("unexpected nil error from Write in handler") - } - return nil - case <-time.After(2 * time.Second): - return errors.New("timeout waiting for Write in handler") - } - }, func(st *serverTester) { - if err := st.fr.WriteSettings( - Setting{SettingInitialWindowSize, 0}, - Setting{SettingMaxFrameSize, maxFrameSize}, - ); err != nil { - t.Fatal(err) - } - st.wantSettingsAck() - - getSlash(st) // make the single request - - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - - if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { - t.Fatal(err) - } - }) -} - -func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.(http.Flusher).Flush() - // Nothing; send empty DATA - return nil - }, func(st *serverTester) { - // Handler gets no data quota: - if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil { - t.Fatal(err) - } - st.wantSettingsAck() - - getSlash(st) // make the single request - - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - - df := st.wantData() - if got := len(df.Data()); got != 0 { - t.Fatalf("unexpected %d DATA bytes; want 0", got) - } - if !df.StreamEnded() { - t.Fatal("DATA didn't have END_STREAM") - } - }) -} - -func TestServer_Response_Automatic100Continue(t *testing.T) { - const msg = "foo" - const reply = "bar" - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - if v := r.Header.Get("Expect"); v != "" { - t.Errorf("Expect header = %q; want empty", v) - } - buf := make([]byte, len(msg)) - // This read should trigger the 100-continue being sent. - if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg { - return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg) - } - _, err := io.WriteString(w, reply) - return err - }, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"), - EndStream: false, - EndHeaders: true, - }) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "100"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Fatalf("Got headers %v; want %v", goth, wanth) - } - - // Okay, they sent status 100, so we can send our - // gigantic and/or sensitive "foo" payload now. - st.writeData(1, true, []byte(msg)) - - st.wantWindowUpdate(0, uint32(len(msg))) - - hf = st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("expected data to follow") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth = st.decodeHeader(hf.HeaderBlockFragment()) - wanth = [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", strconv.Itoa(len(reply))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - - df := st.wantData() - if string(df.Data()) != reply { - t.Errorf("Client read %q; want %q", df.Data(), reply) - } - if !df.StreamEnded() { - t.Errorf("expect data stream end") - } - }) -} - -func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) { - errc := make(chan error, 1) - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - p := []byte("some data.\n") - for { - _, err := w.Write(p) - if err != nil { - errc <- err - return nil - } - } - }, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: false, - EndHeaders: true, - }) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - // Close the connection and wait for the handler to (hopefully) notice. - st.cc.Close() - select { - case <-errc: - case <-time.After(5 * time.Second): - t.Error("timeout") - } - }) -} - -func TestServer_Rejects_Too_Many_Streams(t *testing.T) { - const testPath = "/some/path" - - inHandler := make(chan uint32) - leaveHandler := make(chan bool) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - id := w.(*responseWriter).rws.stream.id - inHandler <- id - if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath { - t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath) - } - <-leaveHandler - }) - defer st.Close() - st.greet() - nextStreamID := uint32(1) - streamID := func() uint32 { - defer func() { nextStreamID += 2 }() - return nextStreamID - } - sendReq := func(id uint32, headers ...string) { - st.writeHeaders(HeadersFrameParam{ - StreamID: id, - BlockFragment: st.encodeHeader(headers...), - EndStream: true, - EndHeaders: true, - }) - } - for i := 0; i < defaultMaxStreams; i++ { - sendReq(streamID()) - <-inHandler - } - defer func() { - for i := 0; i < defaultMaxStreams; i++ { - leaveHandler <- true - } - }() - - // And this one should cross the limit: - // (It's also sent as a CONTINUATION, to verify we still track the decoder context, - // even if we're rejecting it) - rejectID := streamID() - headerBlock := st.encodeHeader(":path", testPath) - frag1, frag2 := headerBlock[:3], headerBlock[3:] - st.writeHeaders(HeadersFrameParam{ - StreamID: rejectID, - BlockFragment: frag1, - EndStream: true, - EndHeaders: false, // CONTINUATION coming - }) - if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil { - t.Fatal(err) - } - st.wantRSTStream(rejectID, ErrCodeProtocol) - - // But let a handler finish: - leaveHandler <- true - st.wantHeaders() - - // And now another stream should be able to start: - goodID := streamID() - sendReq(goodID, ":path", testPath) - select { - case got := <-inHandler: - if got != goodID { - t.Errorf("Got stream %d; want %d", got, goodID) - } - case <-time.After(3 * time.Second): - t.Error("timeout waiting for handler") - } -} - -// So many response headers that the server needs to use CONTINUATION frames: -func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - h := w.Header() - for i := 0; i < 5000; i++ { - h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i)) - } - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.HeadersEnded() { - t.Fatal("got unwanted END_HEADERS flag") - } - n := 0 - for { - n++ - cf := st.wantContinuation() - if cf.HeadersEnded() { - break - } - } - if n < 5 { - t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n) - } - }) -} - -// This previously crashed (reported by Mathieu Lonjaret as observed -// while using Camlistore) because we got a DATA frame from the client -// after the handler exited and our logic at the time was wrong, -// keeping a stream in the map in stateClosed, which tickled an -// invariant check later when we tried to remove that stream (via -// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop -// ended. -func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - // nothing - return nil - }, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: false, // DATA is coming - EndHeaders: true, - }) - hf := st.wantHeaders() - if !hf.HeadersEnded() || !hf.StreamEnded() { - t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf) - } - - // Sent when the a Handler closes while a client has - // indicated it's still sending DATA: - st.wantRSTStream(1, ErrCodeNo) - - // Now the handler has ended, so it's ended its - // stream, but the client hasn't closed its side - // (stateClosedLocal). So send more data and verify - // it doesn't crash with an internal invariant panic, like - // it did before. - st.writeData(1, true, []byte("foo")) - - // Get our flow control bytes back, since the handler didn't get them. - st.wantWindowUpdate(0, uint32(len("foo"))) - - // Sent after a peer sends data anyway (admittedly the - // previous RST_STREAM might've still been in-flight), - // but they'll get the more friendly 'cancel' code - // first. - st.wantRSTStream(1, ErrCodeStreamClosed) - - // Set up a bunch of machinery to record the panic we saw - // previously. - var ( - panMu sync.Mutex - panicVal interface{} - ) - - testHookOnPanicMu.Lock() - testHookOnPanic = func(sc *serverConn, pv interface{}) bool { - panMu.Lock() - panicVal = pv - panMu.Unlock() - return true - } - testHookOnPanicMu.Unlock() - - // Now force the serve loop to end, via closing the connection. - st.cc.Close() - select { - case <-st.sc.doneServing: - // Loop has exited. - panMu.Lock() - got := panicVal - panMu.Unlock() - if got != nil { - t.Errorf("Got panic: %v", got) - } - case <-time.After(5 * time.Second): - t.Error("timeout") - } - }) -} - -func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) } -func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) } - -func testRejectTLS(t *testing.T, max uint16) { - st := newServerTester(t, nil, func(c *tls.Config) { - c.MaxVersion = max - }) - defer st.Close() - gf := st.wantGoAway() - if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { - t.Errorf("Got error code %v; want %v", got, want) - } -} - -func TestServer_Rejects_TLSBadCipher(t *testing.T) { - st := newServerTester(t, nil, func(c *tls.Config) { - // Only list bad ones: - c.CipherSuites = []uint16{ - tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, - } - }) - defer st.Close() - gf := st.wantGoAway() - if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { - t.Errorf("Got error code %v; want %v", got, want) - } -} - -func TestServer_Advertises_Common_Cipher(t *testing.T) { - const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - st := newServerTester(t, nil, func(c *tls.Config) { - // Have the client only support the one required by the spec. - c.CipherSuites = []uint16{requiredSuite} - }, func(ts *httptest.Server) { - var srv *http.Server = ts.Config - // Have the server configured with no specific cipher suites. - // This tests that Go's defaults include the required one. - srv.TLSConfig = nil - }) - defer st.Close() - st.greet() -} - -func (st *serverTester) onHeaderField(f hpack.HeaderField) { - if f.Name == "date" { - return - } - st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value}) -} - -func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) { - st.decodedHeaders = nil - if _, err := st.hpackDec.Write(headerBlock); err != nil { - st.t.Fatalf("hpack decoding error: %v", err) - } - if err := st.hpackDec.Close(); err != nil { - st.t.Fatalf("hpack decoding error: %v", err) - } - return st.decodedHeaders -} - -// testServerResponse sets up an idle HTTP/2 connection. The client function should -// write a single request that must be handled by the handler. This waits up to 5s -// for client to return, then up to an additional 2s for the handler to return. -func testServerResponse(t testing.TB, - handler func(http.ResponseWriter, *http.Request) error, - client func(*serverTester), -) { - errc := make(chan error, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - if r.Body == nil { - t.Fatal("nil Body") - } - errc <- handler(w, r) - }) - defer st.Close() - - donec := make(chan bool) - go func() { - defer close(donec) - st.greet() - client(st) - }() - - select { - case <-donec: - case <-time.After(5 * time.Second): - t.Fatal("timeout in client") - } - - select { - case err := <-errc: - if err != nil { - t.Fatalf("Error in handler: %v", err) - } - case <-time.After(2 * time.Second): - t.Fatal("timeout in handler") - } -} - -// readBodyHandler returns an http Handler func that reads len(want) -// bytes from r.Body and fails t if the contents read were not -// the value of want. -func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - buf := make([]byte, len(want)) - _, err := io.ReadFull(r.Body, buf) - if err != nil { - t.Error(err) - return - } - if string(buf) != want { - t.Errorf("read %q; want %q", buf, want) - } - } -} - -// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See: -// https://github.com/tatsuhiro-t/nghttp2/issues/140 & -// http://sourceforge.net/p/curl/bugs/1472/ -func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) } -func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) } - -func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) { - if runtime.GOOS != "linux" { - t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") - } - if testing.Short() { - t.Skip("skipping curl test in short mode") - } - requireCurl(t) - var gotConn int32 - testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) } - - const msg = "Hello from curl!\n" - ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Foo", "Bar") - w.Header().Set("Client-Proto", r.Proto) - io.WriteString(w, msg) - })) - ConfigureServer(ts.Config, &Server{ - PermitProhibitedCipherSuites: permitProhibitedCipherSuites, - }) - ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config - ts.StartTLS() - defer ts.Close() - - t.Logf("Running test server for curl to hit at: %s", ts.URL) - container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL) - defer kill(container) - resc := make(chan interface{}, 1) - go func() { - res, err := dockerLogs(container) - if err != nil { - resc <- err - } else { - resc <- res - } - }() - select { - case res := <-resc: - if err, ok := res.(error); ok { - t.Fatal(err) - } - body := string(res.([]byte)) - // Search for both "key: value" and "key:value", since curl changed their format - // Our Dockerfile contains the latest version (no space), but just in case people - // didn't rebuild, check both. - if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") { - t.Errorf("didn't see foo: Bar header") - t.Logf("Got: %s", body) - } - if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") { - t.Errorf("didn't see client-proto: HTTP/2 header") - t.Logf("Got: %s", res) - } - if !strings.Contains(string(res.([]byte)), msg) { - t.Errorf("didn't see %q content", msg) - t.Logf("Got: %s", res) - } - case <-time.After(3 * time.Second): - t.Errorf("timeout waiting for curl") - } - - if atomic.LoadInt32(&gotConn) == 0 { - t.Error("never saw an http2 connection") - } -} - -var doh2load = flag.Bool("h2load", false, "Run h2load test") - -func TestServerWithH2Load(t *testing.T) { - if !*doh2load { - t.Skip("Skipping without --h2load flag.") - } - if runtime.GOOS != "linux" { - t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") - } - requireH2load(t) - - msg := strings.Repeat("Hello, h2load!\n", 5000) - ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, msg) - w.(http.Flusher).Flush() - io.WriteString(w, msg) - })) - ts.StartTLS() - defer ts.Close() - - cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl", - "-n100000", "-c100", "-m100", ts.URL) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - t.Fatal(err) - } -} - -// Issue 12843 -func TestServerDoS_MaxHeaderListSize(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) - defer st.Close() - - // shake hands - frameSize := defaultMaxReadFrameSize - var advHeaderListSize *uint32 - st.greetAndCheckSettings(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - if s.Val < minMaxFrameSize { - frameSize = minMaxFrameSize - } else if s.Val > maxFrameSize { - frameSize = maxFrameSize - } else { - frameSize = int(s.Val) - } - case SettingMaxHeaderListSize: - advHeaderListSize = &s.Val - } - return nil - }) - - if advHeaderListSize == nil { - t.Errorf("server didn't advertise a max header list size") - } else if *advHeaderListSize == 0 { - t.Errorf("server advertised a max header list size of 0") - } - - st.encodeHeaderField(":method", "GET") - st.encodeHeaderField(":path", "/") - st.encodeHeaderField(":scheme", "https") - cookie := strings.Repeat("*", 4058) - st.encodeHeaderField("cookie", cookie) - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.headerBuf.Bytes(), - EndStream: true, - EndHeaders: false, - }) - - // Capture the short encoding of a duplicate ~4K cookie, now - // that we've already sent it once. - st.headerBuf.Reset() - st.encodeHeaderField("cookie", cookie) - - // Now send 1MB of it. - const size = 1 << 20 - b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len()) - for len(b) > 0 { - chunk := b - if len(chunk) > frameSize { - chunk = chunk[:frameSize] - } - b = b[len(chunk):] - st.fr.WriteContinuation(1, len(b) == 0, chunk) - } - - h := st.wantHeaders() - if !h.HeadersEnded() { - t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) - } - headers := st.decodeHeader(h.HeaderBlockFragment()) - want := [][2]string{ - {":status", "431"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", "63"}, - } - if !reflect.DeepEqual(headers, want) { - t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) - } -} - -func TestCompressionErrorOnWrite(t *testing.T) { - const maxStrLen = 8 << 10 - var serverConfig *http.Server - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - // No response body. - }, func(ts *httptest.Server) { - serverConfig = ts.Config - serverConfig.MaxHeaderBytes = maxStrLen - }) - st.addLogFilter("connection error: COMPRESSION_ERROR") - defer st.Close() - st.greet() - - maxAllowed := st.sc.framer.maxHeaderStringLen() - - // Crank this up, now that we have a conn connected with the - // hpack.Decoder's max string length set has been initialized - // from the earlier low ~8K value. We want this higher so don't - // hit the max header list size. We only want to test hitting - // the max string size. - serverConfig.MaxHeaderBytes = 1 << 20 - - // First a request with a header that's exactly the max allowed size - // for the hpack compression. It's still too long for the header list - // size, so we'll get the 431 error, but that keeps the compression - // context still valid. - hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed)) - - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: hbf, - EndStream: true, - EndHeaders: true, - }) - h := st.wantHeaders() - if !h.HeadersEnded() { - t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) - } - headers := st.decodeHeader(h.HeaderBlockFragment()) - want := [][2]string{ - {":status", "431"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", "63"}, - } - if !reflect.DeepEqual(headers, want) { - t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) - } - df := st.wantData() - if !strings.Contains(string(df.Data()), "HTTP Error 431") { - t.Errorf("Unexpected data body: %q", df.Data()) - } - if !df.StreamEnded() { - t.Fatalf("expect data stream end") - } - - // And now send one that's just one byte too big. - hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1)) - st.writeHeaders(HeadersFrameParam{ - StreamID: 3, - BlockFragment: hbf, - EndStream: true, - EndHeaders: true, - }) - ga := st.wantGoAway() - if ga.ErrCode != ErrCodeCompression { - t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) - } -} - -func TestCompressionErrorOnClose(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - // No response body. - }) - st.addLogFilter("connection error: COMPRESSION_ERROR") - defer st.Close() - st.greet() - - hbf := st.encodeHeader("foo", "bar") - hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails. - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: hbf, - EndStream: true, - EndHeaders: true, - }) - ga := st.wantGoAway() - if ga.ErrCode != ErrCodeCompression { - t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) - } -} - -// test that a server handler can read trailers from a client -func TestServerReadsTrailers(t *testing.T) { - const testBody = "some test body" - writeReq := func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"), - EndStream: false, - EndHeaders: true, - }) - st.writeData(1, false, []byte(testBody)) - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeaderRaw( - "foo", "foov", - "bar", "barv", - "baz", "bazv", - "surprise", "wasn't declared; shouldn't show up", - ), - EndStream: true, - EndHeaders: true, - }) - } - checkReq := func(r *http.Request) { - wantTrailer := http.Header{ - "Foo": nil, - "Bar": nil, - "Baz": nil, - } - if !reflect.DeepEqual(r.Trailer, wantTrailer) { - t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer) - } - slurp, err := ioutil.ReadAll(r.Body) - if string(slurp) != testBody { - t.Errorf("read body %q; want %q", slurp, testBody) - } - if err != nil { - t.Fatalf("Body slurp: %v", err) - } - wantTrailerAfter := http.Header{ - "Foo": {"foov"}, - "Bar": {"barv"}, - "Baz": {"bazv"}, - } - if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) { - t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter) - } - } - testServerRequest(t, writeReq, checkReq) -} - -// test that a server handler can send trailers -func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) } -func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) } - -func testServerWritesTrailers(t *testing.T, withFlush bool) { - // See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3 - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B") - w.Header().Add("Trailer", "Server-Trailer-C") - w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered - - // Regular headers: - w.Header().Set("Foo", "Bar") - w.Header().Set("Content-Length", "5") // len("Hello") - - io.WriteString(w, "Hello") - if withFlush { - w.(http.Flusher).Flush() - } - w.Header().Set("Server-Trailer-A", "valuea") - w.Header().Set("Server-Trailer-C", "valuec") // skipping B - // After a flush, random keys like Server-Surprise shouldn't show up: - w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!") - // But we do permit promoting keys to trailers after a - // flush if they start with the magic - // otherwise-invalid "Trailer:" prefix: - w.Header().Set("Trailer:Post-Header-Trailer", "hi1") - w.Header().Set("Trailer:post-header-trailer2", "hi2") - w.Header().Set("Trailer:Range", "invalid") - w.Header().Set("Trailer:Foo\x01Bogus", "invalid") - w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 2616 14.40") - w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 2616 14.40") - w.Header().Set("Trailer", "should not be included; Forbidden by RFC 2616 14.40") - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("response HEADERS had END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("response HEADERS didn't have END_HEADERS") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"foo", "Bar"}, - {"trailer", "Server-Trailer-A, Server-Trailer-B"}, - {"trailer", "Server-Trailer-C"}, - {"trailer", "Transfer-Encoding, Content-Length, Trailer"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", "5"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) - } - df := st.wantData() - if string(df.Data()) != "Hello" { - t.Fatalf("Client read %q; want Hello", df.Data()) - } - if df.StreamEnded() { - t.Fatalf("data frame had STREAM_ENDED") - } - tf := st.wantHeaders() // for the trailers - if !tf.StreamEnded() { - t.Fatalf("trailers HEADERS lacked END_STREAM") - } - if !tf.HeadersEnded() { - t.Fatalf("trailers HEADERS lacked END_HEADERS") - } - wanth = [][2]string{ - {"post-header-trailer", "hi1"}, - {"post-header-trailer2", "hi2"}, - {"server-trailer-a", "valuea"}, - {"server-trailer-c", "valuec"}, - } - goth = st.decodeHeader(tf.HeaderBlockFragment()) - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) - } - }) -} - -// validate transmitted header field names & values -// golang.org/issue/14048 -func TestServerDoesntWriteInvalidHeaders(t *testing.T) { - testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - w.Header().Add("OK1", "x") - w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key - w.Header().Add("Bad1\x00", "x") // null in key - w.Header().Add("Bad2", "x\x00y") // null in value - return nil - }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Error("response HEADERS lacked END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("response HEADERS didn't have END_HEADERS") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"ok1", "x"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", "0"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) - } - }) -} - -func BenchmarkServerGets(b *testing.B) { - defer disableGoroutineTracking()() - b.ReportAllocs() - - const msg = "Hello, world" - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, msg) - }) - defer st.Close() - st.greet() - - // Give the server quota to reply. (plus it has the the 64KB) - if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { - b.Fatal(err) - } - - for i := 0; i < b.N; i++ { - id := 1 + uint32(i)*2 - st.writeHeaders(HeadersFrameParam{ - StreamID: id, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - st.wantHeaders() - df := st.wantData() - if !df.StreamEnded() { - b.Fatalf("DATA didn't have END_STREAM; got %v", df) - } - } -} - -func BenchmarkServerPosts(b *testing.B) { - defer disableGoroutineTracking()() - b.ReportAllocs() - - const msg = "Hello, world" - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { - // Consume the (empty) body from th peer before replying, otherwise - // the server will sometimes (depending on scheduling) send the peer a - // a RST_STREAM with the CANCEL error code. - if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { - b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) - } - io.WriteString(w, msg) - }) - defer st.Close() - st.greet() - - // Give the server quota to reply. (plus it has the the 64KB) - if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { - b.Fatal(err) - } - - for i := 0; i < b.N; i++ { - id := 1 + uint32(i)*2 - st.writeHeaders(HeadersFrameParam{ - StreamID: id, - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, - EndHeaders: true, - }) - st.writeData(id, true, nil) - st.wantHeaders() - df := st.wantData() - if !df.StreamEnded() { - b.Fatalf("DATA didn't have END_STREAM; got %v", df) - } - } -} - -// Send a stream of messages from server to client in separate data frames. -// Brings up performance issues seen in long streams. -// Created to show problem in go issue #18502 -func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) { - benchmarkServerToClientStream(b) -} - -// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8 -// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer. -func BenchmarkServerToClientStreamReuseFrames(b *testing.B) { - benchmarkServerToClientStream(b, optFramerReuseFrames) -} - -func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { - defer disableGoroutineTracking()() - b.ReportAllocs() - const msgLen = 1 - // default window size - const windowSize = 1<<16 - 1 - - // next message to send from the server and for the client to expect - nextMsg := func(i int) []byte { - msg := make([]byte, msgLen) - msg[0] = byte(i) - if len(msg) != msgLen { - panic("invalid test setup msg length") - } - return msg - } - - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { - // Consume the (empty) body from th peer before replying, otherwise - // the server will sometimes (depending on scheduling) send the peer a - // a RST_STREAM with the CANCEL error code. - if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { - b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) - } - for i := 0; i < b.N; i += 1 { - w.Write(nextMsg(i)) - w.(http.Flusher).Flush() - } - }, newServerOpts...) - defer st.Close() - st.greet() - - const id = uint32(1) - - st.writeHeaders(HeadersFrameParam{ - StreamID: id, - BlockFragment: st.encodeHeader(":method", "POST"), - EndStream: false, - EndHeaders: true, - }) - - st.writeData(id, true, nil) - st.wantHeaders() - - var pendingWindowUpdate = uint32(0) - - for i := 0; i < b.N; i += 1 { - expected := nextMsg(i) - df := st.wantData() - if bytes.Compare(expected, df.data) != 0 { - b.Fatalf("Bad message received; want %v; got %v", expected, df.data) - } - // try to send infrequent but large window updates so they don't overwhelm the test - pendingWindowUpdate += uint32(len(df.data)) - if pendingWindowUpdate >= windowSize/2 { - if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil { - b.Fatal(err) - } - if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil { - b.Fatal(err) - } - pendingWindowUpdate = 0 - } - } - df := st.wantData() - if !df.StreamEnded() { - b.Fatalf("DATA didn't have END_STREAM; got %v", df) - } -} - -// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53 -// Verify we don't hang. -func TestIssue53(t *testing.T) { - const data = "PRI * HTTP/2.0\r\n\r\nSM" + - "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad" - s := &http.Server{ - ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags), - Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte("hello")) - }), - } - s2 := &Server{ - MaxReadFrameSize: 1 << 16, - PermitProhibitedCipherSuites: true, - } - c := &issue53Conn{[]byte(data), false, false} - s2.ServeConn(c, &ServeConnOpts{BaseConfig: s}) - if !c.closed { - t.Fatal("connection is not closed") - } -} - -type issue53Conn struct { - data []byte - closed bool - written bool -} - -func (c *issue53Conn) Read(b []byte) (n int, err error) { - if len(c.data) == 0 { - return 0, io.EOF - } - n = copy(b, c.data) - c.data = c.data[n:] - return -} - -func (c *issue53Conn) Write(b []byte) (n int, err error) { - c.written = true - return len(b), nil -} - -func (c *issue53Conn) Close() error { - c.closed = true - return nil -} - -func (c *issue53Conn) LocalAddr() net.Addr { - return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} -} -func (c *issue53Conn) RemoteAddr() net.Addr { - return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} -} -func (c *issue53Conn) SetDeadline(t time.Time) error { return nil } -func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil } -func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil } - -// golang.org/issue/12895 -func TestConfigureServer(t *testing.T) { - tests := []struct { - name string - tlsConfig *tls.Config - wantErr string - }{ - { - name: "empty server", - }, - { - name: "just the required cipher suite", - tlsConfig: &tls.Config{ - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, - }, - }, - { - name: "missing required cipher suite", - tlsConfig: &tls.Config{ - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, - }, - wantErr: "is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - }, - { - name: "required after bad", - tlsConfig: &tls.Config{ - CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, - }, - wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after", - }, - { - name: "bad after required", - tlsConfig: &tls.Config{ - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA}, - }, - }, - } - for _, tt := range tests { - srv := &http.Server{TLSConfig: tt.tlsConfig} - err := ConfigureServer(srv, nil) - if (err != nil) != (tt.wantErr != "") { - if tt.wantErr != "" { - t.Errorf("%s: success, but want error", tt.name) - } else { - t.Errorf("%s: unexpected error: %v", tt.name, err) - } - } - if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { - t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr) - } - if err == nil && !srv.TLSConfig.PreferServerCipherSuites { - t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name) - } - } -} - -func TestServerRejectHeadWithBody(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - // No response body. - }) - defer st.Close() - st.greet() - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "HEAD"), - EndStream: false, // what we're testing, a bogus HEAD request with body - EndHeaders: true, - }) - st.wantRSTStream(1, ErrCodeProtocol) -} - -func TestServerNoAutoContentLengthOnHead(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - // No response body. (or smaller than one frame) - }) - defer st.Close() - st.greet() - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, // clients send odd numbers - BlockFragment: st.encodeHeader(":method", "HEAD"), - EndStream: true, - EndHeaders: true, - }) - h := st.wantHeaders() - headers := st.decodeHeader(h.HeaderBlockFragment()) - want := [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, - } - if !reflect.DeepEqual(headers, want) { - t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) - } -} - -// golang.org/issue/13495 -func TestServerNoDuplicateContentType(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - w.Header()["Content-Type"] = []string{""} - fmt.Fprintf(w, "hi") - }) - defer st.Close() - st.greet() - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - h := st.wantHeaders() - headers := st.decodeHeader(h.HeaderBlockFragment()) - want := [][2]string{ - {":status", "200"}, - {"content-type", ""}, - {"content-length", "41"}, - } - if !reflect.DeepEqual(headers, want) { - t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) - } -} - -func disableGoroutineTracking() (restore func()) { - old := DebugGoroutines - DebugGoroutines = false - return func() { DebugGoroutines = old } -} - -func BenchmarkServer_GetRequest(b *testing.B) { - defer disableGoroutineTracking()() - b.ReportAllocs() - const msg = "Hello, world." - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { - n, err := io.Copy(ioutil.Discard, r.Body) - if err != nil || n > 0 { - b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) - } - io.WriteString(w, msg) - }) - defer st.Close() - - st.greet() - // Give the server quota to reply. (plus it has the the 64KB) - if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { - b.Fatal(err) - } - hbf := st.encodeHeader(":method", "GET") - for i := 0; i < b.N; i++ { - streamID := uint32(1 + 2*i) - st.writeHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: hbf, - EndStream: true, - EndHeaders: true, - }) - st.wantHeaders() - st.wantData() - } -} - -func BenchmarkServer_PostRequest(b *testing.B) { - defer disableGoroutineTracking()() - b.ReportAllocs() - const msg = "Hello, world." - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { - n, err := io.Copy(ioutil.Discard, r.Body) - if err != nil || n > 0 { - b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) - } - io.WriteString(w, msg) - }) - defer st.Close() - st.greet() - // Give the server quota to reply. (plus it has the the 64KB) - if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { - b.Fatal(err) - } - hbf := st.encodeHeader(":method", "POST") - for i := 0; i < b.N; i++ { - streamID := uint32(1 + 2*i) - st.writeHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: hbf, - EndStream: false, - EndHeaders: true, - }) - st.writeData(streamID, true, nil) - st.wantHeaders() - st.wantData() - } -} - -type connStateConn struct { - net.Conn - cs tls.ConnectionState -} - -func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs } - -// golang.org/issue/12737 -- handle any net.Conn, not just -// *tls.Conn. -func TestServerHandleCustomConn(t *testing.T) { - var s Server - c1, c2 := net.Pipe() - clientDone := make(chan struct{}) - handlerDone := make(chan struct{}) - var req *http.Request - go func() { - defer close(clientDone) - defer c2.Close() - fr := NewFramer(c2, c2) - io.WriteString(c2, ClientPreface) - fr.WriteSettings() - fr.WriteSettingsAck() - f, err := fr.ReadFrame() - if err != nil { - t.Error(err) - return - } - if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() { - t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f)) - return - } - f, err = fr.ReadFrame() - if err != nil { - t.Error(err) - return - } - if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() { - t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f)) - return - } - var henc hpackEncoder - fr.WriteHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"), - EndStream: true, - EndHeaders: true, - }) - go io.Copy(ioutil.Discard, c2) - <-handlerDone - }() - const testString = "my custom ConnectionState" - fakeConnState := tls.ConnectionState{ - ServerName: testString, - Version: tls.VersionTLS12, - CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - } - go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{ - BaseConfig: &http.Server{ - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer close(handlerDone) - req = r - }), - }}) - select { - case <-clientDone: - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for handler") - } - if req.TLS == nil { - t.Fatalf("Request.TLS is nil. Got: %#v", req) - } - if req.TLS.ServerName != testString { - t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString) - } -} - -// golang.org/issue/14214 -func TestServer_Rejects_ConnHeaders(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - t.Error("should not get to Handler") - }) - defer st.Close() - st.greet() - st.bodylessReq1("connection", "foo") - hf := st.wantHeaders() - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "400"}, - {"content-type", "text/plain; charset=utf-8"}, - {"x-content-type-options", "nosniff"}, - {"content-length", "51"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } -} - -type hpackEncoder struct { - enc *hpack.Encoder - buf bytes.Buffer -} - -func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte { - if len(headers)%2 == 1 { - panic("odd number of kv args") - } - he.buf.Reset() - if he.enc == nil { - he.enc = hpack.NewEncoder(&he.buf) - } - for len(headers) > 0 { - k, v := headers[0], headers[1] - err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v}) - if err != nil { - t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) - } - headers = headers[2:] - } - return he.buf.Bytes() -} - -func TestCheckValidHTTP2Request(t *testing.T) { - tests := []struct { - h http.Header - want error - }{ - { - h: http.Header{"Te": {"trailers"}}, - want: nil, - }, - { - h: http.Header{"Te": {"trailers", "bogus"}}, - want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`), - }, - { - h: http.Header{"Foo": {""}}, - want: nil, - }, - { - h: http.Header{"Connection": {""}}, - want: errors.New(`request header "Connection" is not valid in HTTP/2`), - }, - { - h: http.Header{"Proxy-Connection": {""}}, - want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`), - }, - { - h: http.Header{"Keep-Alive": {""}}, - want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`), - }, - { - h: http.Header{"Upgrade": {""}}, - want: errors.New(`request header "Upgrade" is not valid in HTTP/2`), - }, - } - for i, tt := range tests { - got := checkValidHTTP2RequestHeaders(tt.h) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want) - } - } -} - -// golang.org/issue/14030 -func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { - const msg = "Hello" - const msg2 = "World" - - doRead := make(chan bool, 1) - defer close(doRead) // fallback cleanup - - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, msg) - w.(http.Flusher).Flush() - - // Do a read, which might force a 100-continue status to be sent. - <-doRead - r.Body.Read(make([]byte, 10)) - - io.WriteString(w, msg2) - - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) - req.Header.Set("Expect", "100-continue") - - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - - buf := make([]byte, len(msg)) - if _, err := io.ReadFull(res.Body, buf); err != nil { - t.Fatal(err) - } - if string(buf) != msg { - t.Fatalf("msg = %q; want %q", buf, msg) - } - - doRead <- true - - if _, err := io.ReadFull(res.Body, buf); err != nil { - t.Fatal(err) - } - if string(buf) != msg2 { - t.Fatalf("second msg = %q; want %q", buf, msg2) - } -} - -type funcReader func([]byte) (n int, err error) - -func (f funcReader) Read(p []byte) (n int, err error) { return f(p) } - -// golang.org/issue/16481 -- return flow control when streams close with unread data. -// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport) -func TestUnreadFlowControlReturned_Server(t *testing.T) { - unblock := make(chan bool, 1) - defer close(unblock) - - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - // Don't read the 16KB request body. Wait until the client's - // done sending it and then return. This should cause the Server - // to then return those 16KB of flow control to the client. - <-unblock - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - // This previously hung on the 4th iteration. - for i := 0; i < 6; i++ { - body := io.MultiReader( - io.LimitReader(neverEnding('A'), 16<<10), - funcReader(func([]byte) (n int, err error) { - unblock <- true - return 0, io.EOF - }), - ) - req, _ := http.NewRequest("POST", st.ts.URL, body) - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - } - -} - -func TestServerIdleTimeout(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - }, func(h2s *Server) { - h2s.IdleTimeout = 500 * time.Millisecond - }) - defer st.Close() - - st.greet() - ga := st.wantGoAway() - if ga.ErrCode != ErrCodeNo { - t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) - } -} - -func TestServerIdleTimeout_AfterRequest(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - const timeout = 250 * time.Millisecond - - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - time.Sleep(timeout * 2) - }, func(h2s *Server) { - h2s.IdleTimeout = timeout - }) - defer st.Close() - - st.greet() - - // Send a request which takes twice the timeout. Verifies the - // idle timeout doesn't fire while we're in a request: - st.bodylessReq1() - st.wantHeaders() - - // But the idle timeout should be rearmed after the request - // is done: - ga := st.wantGoAway() - if ga.ErrCode != ErrCodeNo { - t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) - } -} - -// grpc-go closes the Request.Body currently with a Read. -// Verify that it doesn't race. -// See https://github.com/grpc/grpc-go/pull/938 -func TestRequestBodyReadCloseRace(t *testing.T) { - for i := 0; i < 100; i++ { - body := &requestBody{ - pipe: &pipe{ - b: new(bytes.Buffer), - }, - } - body.pipe.CloseWithError(io.EOF) - - done := make(chan bool, 1) - buf := make([]byte, 10) - go func() { - time.Sleep(1 * time.Millisecond) - body.Close() - done <- true - }() - body.Read(buf) - <-done - } -} - -func TestIssue20704Race(t *testing.T) { - if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { - t.Skip("skipping in short mode") - } - const ( - itemSize = 1 << 10 - itemCount = 100 - ) - - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - for i := 0; i < itemCount; i++ { - _, err := w.Write(make([]byte, itemSize)) - if err != nil { - return - } - } - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - cl := &http.Client{Transport: tr} - - for i := 0; i < 1000; i++ { - resp, err := cl.Get(st.ts.URL) - if err != nil { - t.Fatal(err) - } - // Force a RST stream to the server by closing without - // reading the body: - resp.Body.Close() - } -} diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml deleted file mode 100644 index 31a84bed..00000000 --- a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml +++ /dev/null @@ -1,5021 +0,0 @@ - - - - - - - - - - - - - - - - - - - Hypertext Transfer Protocol version 2 - - - Twist -
    - mbelshe@chromium.org -
    -
    - - - Google, Inc -
    - fenix@google.com -
    -
    - - - Mozilla -
    - - 331 E Evelyn Street - Mountain View - CA - 94041 - US - - martin.thomson@gmail.com -
    -
    - - - Applications - HTTPbis - HTTP - SPDY - Web - - - - This specification describes an optimized expression of the semantics of the Hypertext - Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a - reduced perception of latency by introducing header field compression and allowing multiple - concurrent messages on the same connection. It also introduces unsolicited push of - representations from servers to clients. - - - This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. - HTTP's existing semantics remain unchanged. - - - - - - Discussion of this draft takes place on the HTTPBIS working group mailing list - (ietf-http-wg@w3.org), which is archived at . - - - Working Group information can be found at ; that specific to HTTP/2 are at . - - - The changes in this draft are summarized in . - - - -
    - - -
    - - - The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the - HTTP/1.1 message format () has - several characteristics that have a negative overall effect on application performance - today. - - - In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given - TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed - request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 - clients that need to make many requests typically use multiple connections to a server in - order to achieve concurrency and thereby reduce latency. - - - Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary - network traffic, as well as causing the initial TCP congestion - window to quickly fill. This can result in excessive latency when multiple requests are - made on a new TCP connection. - - - HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an - underlying connection. Specifically, it allows interleaving of request and response - messages on the same connection and uses an efficient coding for HTTP header fields. It - also allows prioritization of requests, letting more important requests complete more - quickly, further improving performance. - - - The resulting protocol is more friendly to the network, because fewer TCP connections can - be used in comparison to HTTP/1.x. This means less competition with other flows, and - longer-lived connections, which in turn leads to better utilization of available network - capacity. - - - Finally, HTTP/2 also enables more efficient processing of messages through use of binary - message framing. - -
    - -
    - - HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core - features of HTTP/1.1, but aims to be more efficient in several ways. - - - The basic protocol unit in HTTP/2 is a frame. Each frame - type serves a different purpose. For example, HEADERS and - DATA frames form the basis of HTTP requests and - responses; other frame types like SETTINGS, - WINDOW_UPDATE, and PUSH_PROMISE are used in support of other - HTTP/2 features. - - - Multiplexing of requests is achieved by having each HTTP request-response exchange - associated with its own stream. Streams are largely - independent of each other, so a blocked or stalled request or response does not prevent - progress on other streams. - - - Flow control and prioritization ensure that it is possible to efficiently use multiplexed - streams. Flow control helps to ensure that only data that - can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed - to the most important streams first. - - - HTTP/2 adds a new interaction mode, whereby a server can push - responses to a client. Server push allows a server to speculatively send a client - data that the server anticipates the client will need, trading off some network usage - against a potential latency gain. The server does this by synthesizing a request, which it - sends as a PUSH_PROMISE frame. The server is then able to send a response to - the synthetic request on a separate stream. - - - Frames that contain HTTP header fields are compressed. - HTTP requests can be highly redundant, so compression can reduce the size of requests and - responses significantly. - - -
    - - The HTTP/2 specification is split into four parts: - - - Starting HTTP/2 covers how an HTTP/2 connection is - initiated. - - - The framing and streams layers describe the way HTTP/2 frames are - structured and formed into multiplexed streams. - - - Frame and error - definitions include details of the frame and error types used in HTTP/2. - - - HTTP mappings and additional - requirements describe how HTTP semantics are expressed using frames and - streams. - - - - - While some of the frame and stream layer concepts are isolated from HTTP, this - specification does not define a completely generic framing layer. The framing and streams - layers are tailored to the needs of the HTTP protocol and server push. - -
    - -
    - - The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD - NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as - described in RFC 2119. - - - All numeric values are in network byte order. Values are unsigned unless otherwise - indicated. Literal values are provided in decimal or hexadecimal as appropriate. - Hexadecimal literals are prefixed with 0x to distinguish them - from decimal literals. - - - The following terms are used: - - - The endpoint initiating the HTTP/2 connection. - - - A transport-layer connection between two endpoints. - - - An error that affects the entire HTTP/2 connection. - - - Either the client or server of the connection. - - - The smallest unit of communication within an HTTP/2 connection, consisting of a header - and a variable-length sequence of octets structured according to the frame type. - - - An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint - that is remote to the primary subject of discussion. - - - An endpoint that is receiving frames. - - - An endpoint that is transmitting frames. - - - The endpoint which did not initiate the HTTP/2 connection. - - - A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. - - - An error on the individual HTTP/2 stream. - - - - - Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined - in . - -
    -
    - -
    - - An HTTP/2 connection is an application layer protocol running on top of a TCP connection - (). The client is the TCP connection initiator. - - - HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same - default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, - implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the - upstream server (the immediate peer to which the client wishes to establish a connection) - supports HTTP/2. - - - - The means by which support for HTTP/2 is determined is different for "http" and "https" - URIs. Discovery for "http" URIs is described in . Discovery - for "https" URIs is described in . - - -
    - - The protocol defined in this document has two identifiers. - - - - The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) - field and any place that HTTP/2 over TLS is identified. - - - The "h2" string is serialized into an ALPN protocol identifier as the two octet - sequence: 0x68, 0x32. - - - - - The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. - This identifier is used in the HTTP/1.1 Upgrade header field and any place that - HTTP/2 over TCP is identified. - - - - - - Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message - semantics described in this document. - - - RFC Editor's Note: please remove the remainder of this section prior to the - publication of a final version of this document. - - - Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". - Until such an RFC exists, implementations MUST NOT identify themselves using these - strings. - - - Examples and text throughout the rest of this document use "h2" as a matter of - editorial convenience only. Implementations of draft versions MUST NOT identify using - this string. - - - Implementations of draft versions of the protocol MUST add the string "-" and the - corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 - over TLS is identified using the string "h2-11". - - - Non-compatible experiments that are based on these draft versions MUST append the string - "-" and an experiment name to the identifier. For example, an experimental implementation - of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself - as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in - . Experimenters are - encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. - -
    - -
    - - A client that makes a request for an "http" URI without prior knowledge about support for - HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade - header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include - exactly one HTTP2-Settings header field. - -
    - For example: - - -]]> -
    - - Requests that contain an entity body MUST be sent in their entirety before the client can - send HTTP/2 frames. This means that a large request entity can block the use of the - connection until it is completely sent. - - - If concurrency of an initial request with subsequent requests is important, an OPTIONS - request can be used to perform the upgrade to HTTP/2, at the cost of an additional - round-trip. - - - A server that does not support HTTP/2 can respond to the request as though the Upgrade - header field were absent: - -
    - -HTTP/1.1 200 OK -Content-Length: 243 -Content-Type: text/html - -... - -
    - - A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with - "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . - - - A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) - response. After the empty line that terminates the 101 response, the server can begin - sending HTTP/2 frames. These frames MUST include a response to the request that initiated - the Upgrade. - - -
    - - For example: - - -HTTP/1.1 101 Switching Protocols -Connection: Upgrade -Upgrade: h2c - -[ HTTP/2 connection ... - -
    - - The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a - SETTINGS frame. - - - The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is - assigned default priority values. Stream 1 is - implicitly half closed from the client toward the server, since the request is completed - as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the - response. - - -
    - - A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field - that includes parameters that govern the HTTP/2 connection, provided in anticipation of - the server accepting the request to upgrade. - -
    - -
    - - A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, - or if more than one is present. A server MUST NOT send this header field. - - - - The content of the HTTP2-Settings header field is the - payload of a SETTINGS frame (), encoded as a - base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The - ABNF production for token68 is - defined in . - - - Since the upgrade is only intended to apply to the immediate connection, a client - sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded - downstream. - - - A server decodes and interprets these values as it would any other - SETTINGS frame. Acknowledgement of the - SETTINGS parameters is not necessary, since a 101 response serves as implicit - acknowledgment. Providing these values in the Upgrade request gives a client an - opportunity to provide parameters prior to receiving any frames from the server. - -
    -
    - -
    - - A client that makes a request to an "https" URI uses TLS - with the application layer protocol negotiation extension. - - - HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a - client or selected by a server. - - - Once TLS negotiation is complete, both the client and the server send a connection preface. - -
    - -
    - - A client can learn that a particular server supports HTTP/2 by other means. For example, - describes a mechanism for advertising this capability. - - - A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, - after the connection preface; a server can - identify such a connection by the presence of the connection preface. This only affects - the establishment of HTTP/2 connections over cleartext TCP; implementations that support - HTTP/2 over TLS MUST use protocol negotiation in TLS. - - - Without additional information, prior support for HTTP/2 is not a strong signal that a - given server will support HTTP/2 for future connections. For example, it is possible for - server configurations to change, for configurations to differ between instances in - clustered servers, or for network conditions to change. - -
    - -
    - - Upon establishment of a TCP connection and determination that HTTP/2 will be used by both - peers, each endpoint MUST send a connection preface as a final confirmation and to - establish the initial SETTINGS parameters for the HTTP/2 connection. The client and - server each send a different connection preface. - - - The client connection preface starts with a sequence of 24 octets, which in hex notation - are: - -
    - -
    - - (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence - is followed by a SETTINGS frame (). The - SETTINGS frame MAY be empty. The client sends the client connection - preface immediately upon receipt of a 101 Switching Protocols response (indicating a - successful upgrade), or as the first application data octets of a TLS connection. If - starting an HTTP/2 connection with prior knowledge of server support for the protocol, the - client connection preface is sent upon connection establishment. - - - - - The client connection preface is selected so that a large proportion of HTTP/1.1 or - HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note - that this does not address the concerns raised in . - - - - - The server connection preface consists of a potentially empty SETTINGS - frame () that MUST be the first frame the server sends in the - HTTP/2 connection. - - - The SETTINGS frames received from a peer as part of the connection preface - MUST be acknowledged (see ) after sending the connection - preface. - - - To avoid unnecessary latency, clients are permitted to send additional frames to the - server immediately after sending the client connection preface, without waiting to receive - the server connection preface. It is important to note, however, that the server - connection preface SETTINGS frame might include parameters that necessarily - alter how a client is expected to communicate with the server. Upon receiving the - SETTINGS frame, the client is expected to honor any parameters established. - In some configurations, it is possible for the server to transmit SETTINGS - before the client sends additional frames, providing an opportunity to avoid this issue. - - - Clients and servers MUST treat an invalid connection preface as a connection error of type - PROTOCOL_ERROR. A GOAWAY frame () - MAY be omitted in this case, since an invalid preface indicates that the peer is not using - HTTP/2. - -
    -
    - -
    - - Once the HTTP/2 connection is established, endpoints can begin exchanging frames. - - -
    - - All frames begin with a fixed 9-octet header followed by a variable-length payload. - -
    - -
    - - The fields of the frame header are defined as: - - - - The length of the frame payload expressed as an unsigned 24-bit integer. Values - greater than 214 (16,384) MUST NOT be sent unless the receiver has - set a larger value for SETTINGS_MAX_FRAME_SIZE. - - - The 9 octets of the frame header are not included in this value. - - - - - The 8-bit type of the frame. The frame type determines the format and semantics of - the frame. Implementations MUST ignore and discard any frame that has a type that - is unknown. - - - - - An 8-bit field reserved for frame-type specific boolean flags. - - - Flags are assigned semantics specific to the indicated frame type. Flags that have - no defined semantics for a particular frame type MUST be ignored, and MUST be left - unset (0) when sending. - - - - - A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST - remain unset (0) when sending and MUST be ignored when receiving. - - - - - A 31-bit stream identifier (see ). The value 0 is - reserved for frames that are associated with the connection as a whole as opposed to - an individual stream. - - - - - - The structure and content of the frame payload is dependent entirely on the frame type. - -
    - -
    - - The size of a frame payload is limited by the maximum size that a receiver advertises in - the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value - between 214 (16,384) and 224-1 (16,777,215) octets, - inclusive. - - - All implementations MUST be capable of receiving and minimally processing frames up to - 214 octets in length, plus the 9 octet frame - header. The size of the frame header is not included when describing frame sizes. - - - Certain frame types, such as PING, impose additional limits - on the amount of payload data allowed. - - - - - If a frame size exceeds any defined limit, or is too small to contain mandatory frame - data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error - in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying - a header block (that is, HEADERS, - PUSH_PROMISE, and CONTINUATION), SETTINGS, - and any WINDOW_UPDATE frame with a stream identifier of 0. - - - Endpoints are not obligated to use all available space in a frame. Responsiveness can be - improved by using frames that are smaller than the permitted maximum size. Sending large - frames can result in delays in sending time-sensitive frames (such - RST_STREAM, WINDOW_UPDATE, or PRIORITY) - which if blocked by the transmission of a large frame, could affect performance. - -
    - -
    - - Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. - They are used within HTTP request and response messages as well as server push operations - (see ). - - - Header lists are collections of zero or more header fields. When transmitted over a - connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then - divided into one or more octet sequences, called header block fragments, and transmitted - within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. - - - The Cookie header field is treated specially by the HTTP - mapping (see ). - - - A receiving endpoint reassembles the header block by concatenating its fragments, then - decompresses the block to reconstruct the header list. - - - A complete header block consists of either: - - - a single HEADERS or PUSH_PROMISE frame, - with the END_HEADERS flag set, or - - - a HEADERS or PUSH_PROMISE frame with the END_HEADERS - flag cleared and one or more CONTINUATION frames, - where the last CONTINUATION frame has the END_HEADERS flag set. - - - - - Header compression is stateful. One compression context and one decompression context is - used for the entire connection. Each header block is processed as a discrete unit. - Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved - frames of any other type or from any other stream. The last frame in a sequence of - HEADERS or CONTINUATION frames MUST have the END_HEADERS - flag set. The last frame in a sequence of PUSH_PROMISE or - CONTINUATION frames MUST have the END_HEADERS flag set. This allows a - header block to be logically equivalent to a single frame. - - - Header block fragments can only be sent as the payload of HEADERS, - PUSH_PROMISE or CONTINUATION frames, because these frames - carry data that can modify the compression context maintained by a receiver. An endpoint - receiving HEADERS, PUSH_PROMISE or - CONTINUATION frames MUST reassemble header blocks and perform decompression - even if the frames are to be discarded. A receiver MUST terminate the connection with a - connection error of type - COMPRESSION_ERROR if it does not decompress a header block. - -
    -
    - -
    - - A "stream" is an independent, bi-directional sequence of frames exchanged between the client - and server within an HTTP/2 connection. Streams have several important characteristics: - - - A single HTTP/2 connection can contain multiple concurrently open streams, with either - endpoint interleaving frames from multiple streams. - - - Streams can be established and used unilaterally or shared by either the client or - server. - - - Streams can be closed by either endpoint. - - - The order in which frames are sent on a stream is significant. Recipients process frames - in the order they are received. In particular, the order of HEADERS, - and DATA frames is semantically significant. - - - Streams are identified by an integer. Stream identifiers are assigned to streams by the - endpoint initiating the stream. - - - - -
    - - The lifecycle of a stream is shown in . - - -
    - - | |<-----------' | - | R | closed | R | - `-------------------->| |<--------------------' - +--------+ - - H: HEADERS frame (with implied CONTINUATIONs) - PP: PUSH_PROMISE frame (with implied CONTINUATIONs) - ES: END_STREAM flag - R: RST_STREAM frame -]]> - -
    - - - Note that this diagram shows stream state transitions and the frames and flags that affect - those transitions only. In this regard, CONTINUATION frames do not result - in state transitions; they are effectively part of the HEADERS or - PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is - processed as a separate event to the frame that bears it; a HEADERS frame - with the END_STREAM flag set can cause two state transitions. - - - Both endpoints have a subjective view of the state of a stream that could be different - when frames are in transit. Endpoints do not coordinate the creation of streams; they are - created unilaterally by either endpoint. The negative consequences of a mismatch in - states are limited to the "closed" state after sending RST_STREAM, where - frames might be received for some time after closing. - - - Streams have the following states: - - - - - - All streams start in the "idle" state. In this state, no frames have been - exchanged. - - - The following transitions are valid from this state: - - - Sending or receiving a HEADERS frame causes the stream to become - "open". The stream identifier is selected as described in . The same HEADERS frame can also - cause a stream to immediately become "half closed". - - - Sending a PUSH_PROMISE frame marks the associated stream for - later use. The stream state for the reserved stream transitions to "reserved - (local)". - - - Receiving a PUSH_PROMISE frame marks the associated stream as - reserved by the remote peer. The state of the stream becomes "reserved - (remote)". - - - - - Receiving any frames other than HEADERS or - PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - - - A stream in the "reserved (local)" state is one that has been promised by sending a - PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an - idle stream by associating the stream with an open stream that was initiated by the - remote peer (see ). - - - In this state, only the following transitions are possible: - - - The endpoint can send a HEADERS frame. This causes the stream to - open in a "half closed (remote)" state. - - - Either endpoint can send a RST_STREAM frame to cause the stream - to become "closed". This releases the stream reservation. - - - - - An endpoint MUST NOT send any type of frame other than HEADERS or - RST_STREAM in this state. - - - A PRIORITY frame MAY be received in this state. Receiving any type - of frame other than RST_STREAM or PRIORITY on a stream - in this state MUST be treated as a connection - error of type PROTOCOL_ERROR. - - - - - - - A stream in the "reserved (remote)" state has been reserved by a remote peer. - - - In this state, only the following transitions are possible: - - - Receiving a HEADERS frame causes the stream to transition to - "half closed (local)". - - - Either endpoint can send a RST_STREAM frame to cause the stream - to become "closed". This releases the stream reservation. - - - - - An endpoint MAY send a PRIORITY frame in this state to reprioritize - the reserved stream. An endpoint MUST NOT send any type of frame other than - RST_STREAM, WINDOW_UPDATE, or PRIORITY - in this state. - - - Receiving any type of frame other than HEADERS or - RST_STREAM on a stream in this state MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - - - A stream in the "open" state may be used by both peers to send frames of any type. - In this state, sending peers observe advertised stream - level flow control limits. - - - From this state either endpoint can send a frame with an END_STREAM flag set, which - causes the stream to transition into one of the "half closed" states: an endpoint - sending an END_STREAM flag causes the stream state to become "half closed (local)"; - an endpoint receiving an END_STREAM flag causes the stream state to become "half - closed (remote)". - - - Either endpoint can send a RST_STREAM frame from this state, causing - it to transition immediately to "closed". - - - - - - - A stream that is in the "half closed (local)" state cannot be used for sending - frames. Only WINDOW_UPDATE, PRIORITY and - RST_STREAM frames can be sent in this state. - - - A stream transitions from this state to "closed" when a frame that contains an - END_STREAM flag is received, or when either peer sends a RST_STREAM - frame. - - - A receiver can ignore WINDOW_UPDATE frames in this state, which might - arrive for a short period after a frame bearing the END_STREAM flag is sent. - - - PRIORITY frames received in this state are used to reprioritize - streams that depend on the current stream. - - - - - - - A stream that is "half closed (remote)" is no longer being used by the peer to send - frames. In this state, an endpoint is no longer obligated to maintain a receiver - flow control window if it performs flow control. - - - If an endpoint receives additional frames for a stream that is in this state, other - than WINDOW_UPDATE, PRIORITY or - RST_STREAM, it MUST respond with a stream error of type - STREAM_CLOSED. - - - A stream that is "half closed (remote)" can be used by the endpoint to send frames - of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. - - - A stream can transition from this state to "closed" by sending a frame that contains - an END_STREAM flag, or when either peer sends a RST_STREAM frame. - - - - - - - The "closed" state is the terminal state. - - - An endpoint MUST NOT send frames other than PRIORITY on a closed - stream. An endpoint that receives any frame other than PRIORITY - after receiving a RST_STREAM MUST treat that as a stream error of type - STREAM_CLOSED. Similarly, an endpoint that receives any frames after - receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type - STREAM_CLOSED, unless the frame is permitted as described below. - - - WINDOW_UPDATE or RST_STREAM frames can be received in - this state for a short period after a DATA or HEADERS - frame containing an END_STREAM flag is sent. Until the remote peer receives and - processes RST_STREAM or the frame bearing the END_STREAM flag, it - might send frames of these types. Endpoints MUST ignore - WINDOW_UPDATE or RST_STREAM frames received in this - state, though endpoints MAY choose to treat frames that arrive a significant time - after sending END_STREAM as a connection - error of type PROTOCOL_ERROR. - - - PRIORITY frames can be sent on closed streams to prioritize streams - that are dependent on the closed stream. Endpoints SHOULD process - PRIORITY frame, though they can be ignored if the stream has been - removed from the dependency tree (see ). - - - If this state is reached as a result of sending a RST_STREAM frame, - the peer that receives the RST_STREAM might have already sent - or - enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint - MUST ignore frames that it receives on closed streams after it has sent a - RST_STREAM frame. An endpoint MAY choose to limit the period over - which it ignores frames and treat frames that arrive after this time as being in - error. - - - Flow controlled frames (i.e., DATA) received after sending - RST_STREAM are counted toward the connection flow control window. - Even though these frames might be ignored, because they are sent before the sender - receives the RST_STREAM, the sender will consider the frames to count - against the flow control window. - - - An endpoint might receive a PUSH_PROMISE frame after it sends - RST_STREAM. PUSH_PROMISE causes a stream to become - "reserved" even if the associated stream has been reset. Therefore, a - RST_STREAM is needed to close an unwanted promised stream. - - - - - - In the absence of more specific guidance elsewhere in this document, implementations - SHOULD treat the receipt of a frame that is not expressly permitted in the description of - a state as a connection error of type - PROTOCOL_ERROR. Frame of unknown types are ignored. - - - An example of the state transitions for an HTTP request/response exchange can be found in - . An example of the state transitions for server push can be - found in and . - - -
    - - Streams are identified with an unsigned 31-bit integer. Streams initiated by a client - MUST use odd-numbered stream identifiers; those initiated by the server MUST use - even-numbered stream identifiers. A stream identifier of zero (0x0) is used for - connection control messages; the stream identifier zero cannot be used to establish a - new stream. - - - HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are - responded to with a stream identifier of one (0x1). After the upgrade - completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 - cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. - - - The identifier of a newly established stream MUST be numerically greater than all - streams that the initiating endpoint has opened or reserved. This governs streams that - are opened using a HEADERS frame and streams that are reserved using - PUSH_PROMISE. An endpoint that receives an unexpected stream identifier - MUST respond with a connection error of - type PROTOCOL_ERROR. - - - The first use of a new stream identifier implicitly closes all streams in the "idle" - state that might have been initiated by that peer with a lower-valued stream identifier. - For example, if a client sends a HEADERS frame on stream 7 without ever - sending a frame on stream 5, then stream 5 transitions to the "closed" state when the - first frame for stream 7 is sent or received. - - - Stream identifiers cannot be reused. Long-lived connections can result in an endpoint - exhausting the available range of stream identifiers. A client that is unable to - establish a new stream identifier can establish a new connection for new streams. A - server that is unable to establish a new stream identifier can send a - GOAWAY frame so that the client is forced to open a new connection for - new streams. - -
    - -
    - - A peer can limit the number of concurrently active streams using the - SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent - streams setting is specific to each endpoint and applies only to the peer that receives - the setting. That is, clients specify the maximum number of concurrent streams the - server can initiate, and servers specify the maximum number of concurrent streams the - client can initiate. - - - Streams that are in the "open" state, or either of the "half closed" states count toward - the maximum number of streams that an endpoint is permitted to open. Streams in any of - these three states count toward the limit advertised in the - SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the - "reserved" states do not count toward the stream limit. - - - Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a - HEADERS frame that causes their advertised concurrent stream limit to be - exceeded MUST treat this as a stream error. An - endpoint that wishes to reduce the value of - SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current - number of open streams can either close streams that exceed the new value or allow - streams to complete. - -
    -
    - -
    - - Using streams for multiplexing introduces contention over use of the TCP connection, - resulting in blocked streams. A flow control scheme ensures that streams on the same - connection do not destructively interfere with each other. Flow control is used for both - individual streams and for the connection as a whole. - - - HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. - - -
    - - HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be - used without requiring protocol changes. Flow control in HTTP/2 has the following - characteristics: - - - Flow control is specific to a connection; i.e., it is "hop-by-hop", not - "end-to-end". - - - Flow control is based on window update frames. Receivers advertise how many octets - they are prepared to receive on a stream and for the entire connection. This is a - credit-based scheme. - - - Flow control is directional with overall control provided by the receiver. A - receiver MAY choose to set any window size that it desires for each stream and for - the entire connection. A sender MUST respect flow control limits imposed by a - receiver. Clients, servers and intermediaries all independently advertise their - flow control window as a receiver and abide by the flow control limits set by - their peer when sending. - - - The initial value for the flow control window is 65,535 octets for both new streams - and the overall connection. - - - The frame type determines whether flow control applies to a frame. Of the frames - specified in this document, only DATA frames are subject to flow - control; all other frame types do not consume space in the advertised flow control - window. This ensures that important control frames are not blocked by flow control. - - - Flow control cannot be disabled. - - - HTTP/2 defines only the format and semantics of the WINDOW_UPDATE - frame (). This document does not stipulate how a - receiver decides when to send this frame or the value that it sends, nor does it - specify how a sender chooses to send packets. Implementations are able to select - any algorithm that suits their needs. - - - - - Implementations are also responsible for managing how requests and responses are sent - based on priority; choosing how to avoid head of line blocking for requests; and - managing the creation of new streams. Algorithm choices for these could interact with - any flow control algorithm. - -
    - -
    - - Flow control is defined to protect endpoints that are operating under resource - constraints. For example, a proxy needs to share memory between many connections, and - also might have a slow upstream connection and a fast downstream one. Flow control - addresses cases where the receiver is unable process data on one stream, yet wants to - continue to process other streams in the same connection. - - - Deployments that do not require this capability can advertise a flow control window of - the maximum size, incrementing the available space when new data is received. This - effectively disables flow control for that receiver. Conversely, a sender is always - subject to the flow control window advertised by the receiver. - - - Deployments with constrained resources (for example, memory) can employ flow control to - limit the amount of memory a peer can consume. Note, however, that this can lead to - suboptimal use of available network resources if flow control is enabled without - knowledge of the bandwidth-delay product (see ). - - - Even with full awareness of the current bandwidth-delay product, implementation of flow - control can be difficult. When using flow control, the receiver MUST read from the TCP - receive buffer in a timely fashion. Failure to do so could lead to a deadlock when - critical frames, such as WINDOW_UPDATE, are not read and acted upon. - -
    -
    - -
    - - A client can assign a priority for a new stream by including prioritization information in - the HEADERS frame that opens the stream. For an existing - stream, the PRIORITY frame can be used to change the - priority. - - - The purpose of prioritization is to allow an endpoint to express how it would prefer its - peer allocate resources when managing concurrent streams. Most importantly, priority can - be used to select streams for transmitting frames when there is limited capacity for - sending. - - - Streams can be prioritized by marking them as dependent on the completion of other streams - (). Each dependency is assigned a relative weight, a number - that is used to determine the relative proportion of available resources that are assigned - to streams dependent on the same stream. - - - - Explicitly setting the priority for a stream is input to a prioritization process. It - does not guarantee any particular processing or transmission order for the stream relative - to any other stream. An endpoint cannot force a peer to process concurrent streams in a - particular order using priority. Expressing priority is therefore only ever a suggestion. - - - Providing prioritization information is optional, so default values are used if no - explicit indicator is provided (). - - -
    - - Each stream can be given an explicit dependency on another stream. Including a - dependency expresses a preference to allocate resources to the identified stream rather - than to the dependent stream. - - - A stream that is not dependent on any other stream is given a stream dependency of 0x0. - In other words, the non-existent stream 0 forms the root of the tree. - - - A stream that depends on another stream is a dependent stream. The stream upon which a - stream is dependent is a parent stream. A dependency on a stream that is not currently - in the tree - such as a stream in the "idle" state - results in that stream being given - a default priority. - - - When assigning a dependency on another stream, the stream is added as a new dependency - of the parent stream. Dependent streams that share the same parent are not ordered with - respect to each other. For example, if streams B and C are dependent on stream A, and - if stream D is created with a dependency on stream A, this results in a dependency order - of A followed by B, C, and D in any order. - -
    - /|\ - B C B D C -]]> -
    - - An exclusive flag allows for the insertion of a new level of dependencies. The - exclusive flag causes the stream to become the sole dependency of its parent stream, - causing other dependencies to become dependent on the exclusive stream. In the - previous example, if stream D is created with an exclusive dependency on stream A, this - results in D becoming the dependency parent of B and C. - -
    - D - B C / \ - B C -]]> -
    - - Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all - of the streams that it depends on (the chain of parent streams up to 0x0) are either - closed, or it is not possible to make progress on them. - - - A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. - -
    - -
    - - All dependent streams are allocated an integer weight between 1 and 256 (inclusive). - - - Streams with the same parent SHOULD be allocated resources proportionally based on their - weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A - with weight 12, and if no progress can be made on A, stream B ideally receives one third - of the resources allocated to stream C. - -
    - -
    - - Stream priorities are changed using the PRIORITY frame. Setting a - dependency causes a stream to become dependent on the identified parent stream. - - - Dependent streams move with their parent stream if the parent is reprioritized. Setting - a dependency with the exclusive flag for a reprioritized stream moves all the - dependencies of the new parent stream to become dependent on the reprioritized stream. - - - If a stream is made dependent on one of its own dependencies, the formerly dependent - stream is first moved to be dependent on the reprioritized stream's previous parent. - The moved dependency retains its weight. - -
    - - For example, consider an original dependency tree where B and C depend on A, D and E - depend on C, and F depends on D. If A is made dependent on D, then D takes the place - of A. All other dependency relationships stay the same, except for F, which becomes - dependent on A if the reprioritization is exclusive. - - F B C ==> F A OR A - / \ | / \ /|\ - D E E B C B C F - | | | - F E E - (intermediate) (non-exclusive) (exclusive) -]]> -
    -
    - -
    - - When a stream is removed from the dependency tree, its dependencies can be moved to - become dependent on the parent of the closed stream. The weights of new dependencies - are recalculated by distributing the weight of the dependency of the closed stream - proportionally based on the weights of its dependencies. - - - Streams that are removed from the dependency tree cause some prioritization information - to be lost. Resources are shared between streams with the same parent stream, which - means that if a stream in that set closes or becomes blocked, any spare capacity - allocated to a stream is distributed to the immediate neighbors of the stream. However, - if the common dependency is removed from the tree, those streams share resources with - streams at the next highest level. - - - For example, assume streams A and B share a parent, and streams C and D both depend on - stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, - then stream C receives all the resources dedicated to stream A. If stream A is removed - from the tree, the weight of stream A is divided between streams C and D. If stream D - is still unable to proceed, this results in stream C receiving a reduced proportion of - resources. For equal starting weights, C receives one third, rather than one half, of - available resources. - - - It is possible for a stream to become closed while prioritization information that - creates a dependency on that stream is in transit. If a stream identified in a - dependency has no associated priority information, then the dependent stream is instead - assigned a default priority. This potentially creates - suboptimal prioritization, since the stream could be given a priority that is different - to what is intended. - - - To avoid these problems, an endpoint SHOULD retain stream prioritization state for a - period after streams become closed. The longer state is retained, the lower the chance - that streams are assigned incorrect or default priority values. - - - This could create a large state burden for an endpoint, so this state MAY be limited. - An endpoint MAY apply a fixed upper limit on the number of closed streams for which - prioritization state is tracked to limit state exposure. The amount of additional state - an endpoint maintains could be dependent on load; under high load, prioritization state - can be discarded to limit resource commitments. In extreme cases, an endpoint could - even discard prioritization state for active or reserved streams. If a fixed limit is - applied, endpoints SHOULD maintain state for at least as many streams as allowed by - their setting for SETTINGS_MAX_CONCURRENT_STREAMS. - - - An endpoint receiving a PRIORITY frame that changes the priority of a - closed stream SHOULD alter the dependencies of the streams that depend on it, if it has - retained enough state to do so. - -
    - -
    - - Providing priority information is optional. Streams are assigned a non-exclusive - dependency on stream 0x0 by default. Pushed streams - initially depend on their associated stream. In both cases, streams are assigned a - default weight of 16. - -
    -
    - -
    - - HTTP/2 framing permits two classes of error: - - - An error condition that renders the entire connection unusable is a connection error. - - - An error in an individual stream is a stream error. - - - - - A list of error codes is included in . - - -
    - - A connection error is any error which prevents further processing of the framing layer, - or which corrupts any connection state. - - - An endpoint that encounters a connection error SHOULD first send a GOAWAY - frame () with the stream identifier of the last stream that it - successfully received from its peer. The GOAWAY frame includes an error - code that indicates why the connection is terminating. After sending the - GOAWAY frame, the endpoint MUST close the TCP connection. - - - It is possible that the GOAWAY will not be reliably received by the - receiving endpoint (see ). In the event of a connection error, - GOAWAY only provides a best effort attempt to communicate with the peer - about why the connection is being terminated. - - - An endpoint can end a connection at any time. In particular, an endpoint MAY choose to - treat a stream error as a connection error. Endpoints SHOULD send a - GOAWAY frame when ending a connection, providing that circumstances - permit it. - -
    - -
    - - A stream error is an error related to a specific stream that does not affect processing - of other streams. - - - An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error - occurred. The RST_STREAM frame includes an error code that indicates the - type of error. - - - A RST_STREAM is the last frame that an endpoint can send on a stream. - The peer that sends the RST_STREAM frame MUST be prepared to receive any - frames that were sent or enqueued for sending by the remote peer. These frames can be - ignored, except where they modify connection state (such as the state maintained for - header compression, or flow control). - - - Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for - any stream. However, an endpoint MAY send additional RST_STREAM frames if - it receives frames on a closed stream after more than a round-trip time. This behavior - is permitted to deal with misbehaving implementations. - - - An endpoint MUST NOT send a RST_STREAM in response to an - RST_STREAM frame, to avoid looping. - -
    - -
    - - If the TCP connection is closed or reset while streams remain in open or half closed - states, then the endpoint MUST assume that those streams were abnormally interrupted and - could be incomplete. - -
    -
    - -
    - - HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide - additional services or alter any aspect of the protocol, within the limitations described - in this section. Extensions are effective only within the scope of a single HTTP/2 - connection. - - - Extensions are permitted to use new frame types, new - settings, or new error - codes. Registries are established for managing these extension points: frame types, settings and - error codes. - - - Implementations MUST ignore unknown or unsupported values in all extensible protocol - elements. Implementations MUST discard frames that have unknown or unsupported types. - This means that any of these extension points can be safely used by extensions without - prior arrangement or negotiation. However, extension frames that appear in the middle of - a header block are not permitted; these MUST be treated - as a connection error of type - PROTOCOL_ERROR. - - - However, extensions that could change the semantics of existing protocol components MUST - be negotiated before being used. For example, an extension that changes the layout of the - HEADERS frame cannot be used until the peer has given a positive signal - that this is acceptable. In this case, it could also be necessary to coordinate when the - revised layout comes into effect. Note that treating any frame other than - DATA frames as flow controlled is such a change in semantics, and can only - be done through negotiation. - - - This document doesn't mandate a specific method for negotiating the use of an extension, - but notes that a setting could be used for that - purpose. If both peers set a value that indicates willingness to use the extension, then - the extension can be used. If a setting is used for extension negotiation, the initial - value MUST be defined so that the extension is initially disabled. - -
    -
    - -
    - - This specification defines a number of frame types, each identified by a unique 8-bit type - code. Each frame type serves a distinct purpose either in the establishment and management - of the connection as a whole, or of individual streams. - - - The transmission of specific frame types can alter the state of a connection. If endpoints - fail to maintain a synchronized view of the connection state, successful communication - within the connection will no longer be possible. Therefore, it is important that endpoints - have a shared comprehension of how the state is affected by the use any given frame. - - -
    - - DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated - with a stream. One or more DATA frames are used, for instance, to carry HTTP request or - response payloads. - - - DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to - obscure the size of messages. - -
    - -
    - - The DATA frame contains the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is optional and is only present if the PADDED flag is set. - - - Application data. The amount of data is the remainder of the frame payload after - subtracting the length of the other fields that are present. - - - Padding octets that contain no application semantic value. Padding octets MUST be set - to zero when sending and ignored when receiving. - - - - - - The DATA frame defines the following flags: - - - Bit 1 being set indicates that this frame is the last that the endpoint will send for - the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. - - - Bit 4 being set indicates that the Pad Length field and any padding that it describes - is present. - - - - - DATA frames MUST be associated with a stream. If a DATA frame is received whose stream - identifier field is 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - DATA frames are subject to flow control and can only be sent when a stream is in the - "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow - control, including Pad Length and Padding fields if present. If a DATA frame is received - whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond - with a stream error of type - STREAM_CLOSED. - - - The total number of padding octets is determined by the value of the Pad Length field. If - the length of the padding is greater than the length of the frame payload, the recipient - MUST treat this as a connection error of - type PROTOCOL_ERROR. - - - A frame can be increased in size by one octet by including a Pad Length field with a - value of zero. - - - - - Padding is a security feature; see . - -
    - -
    - - The HEADERS frame (type=0x1) is used to open a stream, - and additionally carries a header block fragment. HEADERS frames can be sent on a stream - in the "open" or "half closed (remote)" states. - -
    - -
    - - The HEADERS frame payload has the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is only present if the PADDED flag is set. - - - A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. - - - A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. - - - An 8-bit weight for the stream, see . Add one to the - value to obtain a weight between 1 and 256. This field is only present if the - PRIORITY flag is set. - - - A header block fragment. - - - Padding octets that contain no application semantic value. Padding octets MUST be set - to zero when sending and ignored when receiving. - - - - - - The HEADERS frame defines the following flags: - - - - Bit 1 being set indicates that the header block is - the last that the endpoint will send for the identified stream. Setting this flag - causes the stream to enter one of "half closed" - states. - - - A HEADERS frame carries the END_STREAM flag that signals the end of a stream. - However, a HEADERS frame with the END_STREAM flag set can be followed by - CONTINUATION frames on the same stream. Logically, the - CONTINUATION frames are part of the HEADERS frame. - - - - - Bit 3 being set indicates that this frame contains an entire header block and is not followed by any - CONTINUATION frames. - - - A HEADERS frame without the END_HEADERS flag set MUST be followed by a - CONTINUATION frame for the same stream. A receiver MUST treat the - receipt of any other type of frame or a frame on a different stream as a connection error of type - PROTOCOL_ERROR. - - - - - Bit 4 being set indicates that the Pad Length field and any padding that it - describes is present. - - - - - Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight - fields are present; see . - - - - - - - The payload of a HEADERS frame contains a header block - fragment. A header block that does not fit within a HEADERS frame is continued in - a CONTINUATION frame. - - - - HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose - stream identifier field is 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - - The HEADERS frame changes the connection state as described in . - - - - The HEADERS frame includes optional padding. Padding fields and flags are identical to - those defined for DATA frames. - - - Prioritization information in a HEADERS frame is logically equivalent to a separate - PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in - stream prioritization when new streams are created. Priorization fields in HEADERS frames - subsequent to the first on a stream reprioritize the - stream. - -
    - -
    - - The PRIORITY frame (type=0x2) specifies the sender-advised - priority of a stream. It can be sent at any time for an existing stream, including - closed streams. This enables reprioritization of existing streams. - -
    - -
    - - The payload of a PRIORITY frame contains the following fields: - - - A single bit flag indicates that the stream dependency is exclusive, see . - - - A 31-bit stream identifier for the stream that this stream depends on, see . - - - An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. - - - - - - The PRIORITY frame does not define any flags. - - - - The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received - with a stream identifier of 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", - "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be - sent between consecutive frames that comprise a single header - block. Note that this frame could arrive after processing or frame sending has - completed, which would cause it to have no effect on the current stream. For a stream - that is in the "half closed (remote)" or "closed" - state, this frame can only affect - processing of the current stream and not frame transmission. - - - The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. - This allows for the reprioritization of a group of dependent streams by altering the - priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a - closed stream risks being ignored due to the peer having discarded priority state - information for that stream. - -
    - -
    - - The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by - the initiator of a stream, it indicates that they wish to cancel the stream or that an - error condition has occurred. When sent by the receiver of a stream, it indicates that - either the receiver is rejecting the stream, requesting that the stream be cancelled, or - that an error condition has occurred. - -
    - -
    - - - The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being - terminated. - - - - The RST_STREAM frame does not define any flags. - - - - The RST_STREAM frame fully terminates the referenced stream and causes it to enter the - closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send - additional frames for that stream, with the exception of PRIORITY. However, - after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process - additional frames sent on the stream that might have been sent by the peer prior to the - arrival of the RST_STREAM. - - - - RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received - with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type - PROTOCOL_ERROR. - - - - RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM - frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type - PROTOCOL_ERROR. - - -
    - -
    - - The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints - communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is - also used to acknowledge the receipt of those parameters. Individually, a SETTINGS - parameter can also be referred to as a "setting". - - - SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, - which are used by the receiving peer. Different values for the same parameter can be - advertised by each peer. For example, a client might set a high initial flow control - window, whereas a server might set a lower value to conserve resources. - - - - A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be - sent at any other time by either endpoint over the lifetime of the connection. - Implementations MUST support all of the parameters defined by this specification. - - - - Each parameter in a SETTINGS frame replaces any existing value for that parameter. - Parameters are processed in the order in which they appear, and a receiver of a SETTINGS - frame does not need to maintain any state other than the current value of its - parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by - a receiver. - - - SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS - frame defines the following flag: - - - Bit 1 being set indicates that this frame acknowledges receipt and application of the - peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST - be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value - other than 0 MUST be treated as a connection - error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. - - - - - SETTINGS frames always apply to a connection, never a single stream. The stream - identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS - frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond - with a connection error of type - PROTOCOL_ERROR. - - - The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame - MUST be treated as a connection error of type - PROTOCOL_ERROR. - - -
    - - The payload of a SETTINGS frame consists of zero or more parameters, each consisting of - an unsigned 16-bit setting identifier and an unsigned 32-bit value. - - -
    - -
    -
    - -
    - - The following parameters are defined: - - - - Allows the sender to inform the remote endpoint of the maximum size of the header - compression table used to decode header blocks, in octets. The encoder can select - any size equal to or less than this value by using signaling specific to the - header compression format inside a header block. The initial value is 4,096 - octets. - - - - - This setting can be use to disable server - push. An endpoint MUST NOT send a PUSH_PROMISE frame if it - receives this parameter set to a value of 0. An endpoint that has both set this - parameter to 0 and had it acknowledged MUST treat the receipt of a - PUSH_PROMISE frame as a connection error of type - PROTOCOL_ERROR. - - - The initial value is 1, which indicates that server push is permitted. Any value - other than 0 or 1 MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - Indicates the maximum number of concurrent streams that the sender will allow. - This limit is directional: it applies to the number of streams that the sender - permits the receiver to create. Initially there is no limit to this value. It is - recommended that this value be no smaller than 100, so as to not unnecessarily - limit parallelism. - - - A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special - by endpoints. A zero value does prevent the creation of new streams, however this - can also happen for any limit that is exhausted with active streams. Servers - SHOULD only set a zero value for short durations; if a server does not wish to - accept requests, closing the connection could be preferable. - - - - - Indicates the sender's initial window size (in octets) for stream level flow - control. The initial value is 216-1 (65,535) octets. - - - This setting affects the window size of all streams, including existing streams, - see . - - - Values above the maximum flow control window size of 231-1 MUST - be treated as a connection error of - type FLOW_CONTROL_ERROR. - - - - - Indicates the size of the largest frame payload that the sender is willing to - receive, in octets. - - - The initial value is 214 (16,384) octets. The value advertised by - an endpoint MUST be between this initial value and the maximum allowed frame size - (224-1 or 16,777,215 octets), inclusive. Values outside this range - MUST be treated as a connection error - of type PROTOCOL_ERROR. - - - - - This advisory setting informs a peer of the maximum size of header list that the - sender is prepared to accept, in octets. The value is based on the uncompressed - size of header fields, including the length of the name and value in octets plus - an overhead of 32 octets for each header field. - - - For any given request, a lower limit than what is advertised MAY be enforced. The - initial value of this setting is unlimited. - - - - - - An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier - MUST ignore that setting. - -
    - -
    - - Most values in SETTINGS benefit from or require an understanding of when the peer has - received and applied the changed parameter values. In order to provide - such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag - is not set MUST apply the updated parameters as soon as possible upon receipt. - - - The values in the SETTINGS frame MUST be processed in the order they appear, with no - other frame processing between values. Unsupported parameters MUST be ignored. Once - all values have been processed, the recipient MUST immediately emit a SETTINGS frame - with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender - of the altered parameters can rely on the setting having been applied. - - - If the sender of a SETTINGS frame does not receive an acknowledgement within a - reasonable amount of time, it MAY issue a connection error of type - SETTINGS_TIMEOUT. - -
    -
    - -
    - - The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of - streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned - 31-bit identifier of the stream the endpoint plans to create along with a set of headers - that provide additional context for the stream. contains a - thorough description of the use of PUSH_PROMISE frames. - - -
    - -
    - - The PUSH_PROMISE frame payload has the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is only present if the PADDED flag is set. - - - A single reserved bit. - - - An unsigned 31-bit integer that identifies the stream that is reserved by the - PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next - stream sent by the sender (see new stream - identifier). - - - A header block fragment containing request header - fields. - - - Padding octets. - - - - - - The PUSH_PROMISE frame defines the following flags: - - - - Bit 3 being set indicates that this frame contains an entire header block and is not followed by any - CONTINUATION frames. - - - A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a - CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any - other type of frame or a frame on a different stream as a connection error of type - PROTOCOL_ERROR. - - - - - Bit 4 being set indicates that the Pad Length field and any padding that it - describes is present. - - - - - - - PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream - identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the - stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - - Promised streams are not required to be used in the order they are promised. The - PUSH_PROMISE only reserves stream identifiers for later use. - - - - PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the - peer endpoint is set to 0. An endpoint that has set this setting and has received - acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type - PROTOCOL_ERROR. - - - Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a - RST_STREAM referencing the promised stream identifier back to the sender of - the PUSH_PROMISE. - - - - A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for - header compression. PUSH_PROMISE also reserves a stream for later use, causing the - promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a - stream unless that stream is either "open" or "half closed (remote)"; the sender MUST - ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST - be in the "idle" state). - - - Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream - state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a - stream that is neither "open" nor "half closed (local)" as a connection error of type - PROTOCOL_ERROR. However, an endpoint that has sent - RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that - might have been created before the RST_STREAM frame is received and - processed. - - - A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a - stream that is not currently in the "idle" state) as a connection error of type - PROTOCOL_ERROR. - - - - The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical - to those defined for DATA frames. - -
    - -
    - - The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the - sender, as well as determining whether an idle connection is still functional. PING - frames can be sent from any endpoint. - -
    - -
    - - - In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. - A sender can include any value it chooses and use those bytes in any fashion. - - - Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with - the ACK flag set in response, with an identical payload. PING responses SHOULD be given - higher priority than any other frame. - - - - The PING frame defines the following flags: - - - Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST - set this flag in PING responses. An endpoint MUST NOT respond to PING frames - containing this flag. - - - - - PING frames are not associated with any individual stream. If a PING frame is received - with a stream identifier field value other than 0x0, the recipient MUST respond with a - connection error of type - PROTOCOL_ERROR. - - - Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type - FRAME_SIZE_ERROR. - - -
    - -
    - - The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this - connection. GOAWAY can be sent by either the client or the server. Once sent, the sender - will ignore frames sent on any new streams with identifiers higher than the included last - stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the - connection, although a new connection can be established for new streams. - - - The purpose of this frame is to allow an endpoint to gracefully stop accepting new - streams, while still finishing processing of previously established streams. This enables - administrative actions, like server maintainance. - - - There is an inherent race condition between an endpoint starting new streams and the - remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream - identifier of the last peer-initiated stream which was or might be processed on the - sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, - the identified stream is the highest numbered stream initiated by the client. - - - If the receiver of the GOAWAY has sent data on streams with a higher stream identifier - than what is indicated in the GOAWAY frame, those streams are not or will not be - processed. The receiver of the GOAWAY frame can treat the streams as though they had - never been created at all, thereby allowing those streams to be retried later on a new - connection. - - - Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote - can know whether a stream has been partially processed or not. For example, if an HTTP - client sends a POST at the same time that a server closes a connection, the client cannot - know if the server started to process that POST request if the server does not send a - GOAWAY frame to indicate what streams it might have acted on. - - - An endpoint might choose to close a connection without sending GOAWAY for misbehaving - peers. - - -
    - -
    - - The GOAWAY frame does not define any flags. - - - The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat - a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type - PROTOCOL_ERROR. - - - The last stream identifier in the GOAWAY frame contains the highest numbered stream - identifier for which the sender of the GOAWAY frame might have taken some action on, or - might yet take action on. All streams up to and including the identified stream might - have been processed in some way. The last stream identifier can be set to 0 if no streams - were processed. - - - In this context, "processed" means that some data from the stream was passed to some - higher layer of software that might have taken some action as a result. - - - If a connection terminates without a GOAWAY frame, the last stream identifier is - effectively the highest possible stream identifier. - - - On streams with lower or equal numbered identifiers that were not closed completely prior - to the connection being closed, re-attempting requests, transactions, or any protocol - activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or - DELETE. Any protocol activity that uses higher numbered streams can be safely retried - using a new connection. - - - Activity on streams numbered lower or equal to the last stream identifier might still - complete successfully. The sender of a GOAWAY frame might gracefully shut down a - connection by sending a GOAWAY frame, maintaining the connection in an open state until - all in-progress streams complete. - - - An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an - endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could - subsequently encounter an condition that requires immediate termination of the connection. - The last stream identifier from the last GOAWAY frame received indicates which streams - could have been acted upon. Endpoints MUST NOT increase the value they send in the last - stream identifier, since the peers might already have retried unprocessed requests on - another connection. - - - A client that is unable to retry requests loses all requests that are in flight when the - server closes the connection. This is especially true for intermediaries that might - not be serving clients using HTTP/2. A server that is attempting to gracefully shut down - a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to - 231-1 and a NO_ERROR code. This signals to the client that - a shutdown is imminent and that no further requests can be initiated. After waiting at - least one round trip time, the server can send another GOAWAY frame with an updated last - stream identifier. This ensures that a connection can be cleanly shut down without losing - requests. - - - - After sending a GOAWAY frame, the sender can discard frames for streams with identifiers - higher than the identified last stream. However, any frames that alter connection state - cannot be completely ignored. For instance, HEADERS, - PUSH_PROMISE and CONTINUATION frames MUST be minimally - processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow - control window. Failure to process these frames can cause flow control or header - compression state to become unsynchronized. - - - - The GOAWAY frame also contains a 32-bit error code that - contains the reason for closing the connection. - - - Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug - data is intended for diagnostic purposes only and carries no semantic value. Debug - information could contain security- or privacy-sensitive data. Logged or otherwise - persistently stored debug data MUST have adequate safeguards to prevent unauthorized - access. - -
    - -
    - - The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. - - - Flow control operates at two levels: on each individual stream and on the entire - connection. - - - Both types of flow control are hop-by-hop; that is, only between the two endpoints. - Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. - However, throttling of data transfer by any receiver can indirectly cause the propagation - of flow control information toward the original sender. - - - Flow control only applies to frames that are identified as being subject to flow control. - Of the frame types defined in this document, this includes only DATA frames. - Frames that are exempt from flow control MUST be accepted and processed, unless the - receiver is unable to assign resources to handling the frame. A receiver MAY respond with - a stream error or connection error of type - FLOW_CONTROL_ERROR if it is unable to accept a frame. - -
    - -
    - - The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer - indicating the number of octets that the sender can transmit in addition to the existing - flow control window. The legal range for the increment to the flow control window is 1 to - 231-1 (0x7fffffff) octets. - - - The WINDOW_UPDATE frame does not define any flags. - - - The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the - former case, the frame's stream identifier indicates the affected stream; in the latter, - the value "0" indicates that the entire connection is the subject of the frame. - - - A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window - increment of 0 as a stream error of type - PROTOCOL_ERROR; errors on the connection flow control window MUST be - treated as a connection error. - - - WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. - This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" - or "closed" stream. A receiver MUST NOT treat this as an error, see . - - - A receiver that receives a flow controlled frame MUST always account for its contribution - against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the - frame is in error. Since the sender counts the frame toward the flow control window, if - the receiver does not, the flow control window at sender and receiver can become - different. - - -
    - - Flow control in HTTP/2 is implemented using a window kept by each sender on every - stream. The flow control window is a simple integer value that indicates how many octets - of data the sender is permitted to transmit; as such, its size is a measure of the - buffering capacity of the receiver. - - - Two flow control windows are applicable: the stream flow control window and the - connection flow control window. The sender MUST NOT send a flow controlled frame with a - length that exceeds the space available in either of the flow control windows advertised - by the receiver. Frames with zero length with the END_STREAM flag set (that is, an - empty DATA frame) MAY be sent if there is no available space in either - flow control window. - - - For flow control calculations, the 9 octet frame header is not counted. - - - After sending a flow controlled frame, the sender reduces the space available in both - windows by the length of the transmitted frame. - - - The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up - space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream - and connection level flow control windows. - - - A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the - amount specified in the frame. - - - A sender MUST NOT allow a flow control window to exceed 231-1 octets. - If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this - maximum it MUST terminate either the stream or the connection, as appropriate. For - streams, the sender sends a RST_STREAM with the error code of - FLOW_CONTROL_ERROR code; for the connection, a GOAWAY - frame with a FLOW_CONTROL_ERROR code. - - - Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are - completely asynchronous with respect to each other. This property allows a receiver to - aggressively update the window size kept by the sender to prevent streams from stalling. - -
    - -
    - - When an HTTP/2 connection is first established, new streams are created with an initial - flow control window size of 65,535 octets. The connection flow control window is 65,535 - octets. Both endpoints can adjust the initial window size for new streams by including - a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS - frame that forms part of the connection preface. The connection flow control window can - only be changed using WINDOW_UPDATE frames. - - - Prior to receiving a SETTINGS frame that sets a value for - SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default - initial window size when sending flow controlled frames. Similarly, the connection flow - control window is set to the default initial window size until a WINDOW_UPDATE frame is - received. - - - A SETTINGS frame can alter the initial flow control window size for all - current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, - a receiver MUST adjust the size of all stream flow control windows that it maintains by - the difference between the new value and the old value. - - - A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in - a flow control window to become negative. A sender MUST track the negative flow control - window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE - frames that cause the flow control window to become positive. - - - For example, if the client sends 60KB immediately on connection establishment, and the - server sets the initial window size to be 16KB, the client will recalculate the - available flow control window to be -44KB on receipt of the SETTINGS - frame. The client retains a negative flow control window until WINDOW_UPDATE frames - restore the window to being positive, after which the client can resume sending. - - - A SETTINGS frame cannot alter the connection flow control window. - - - An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that - causes any flow control window to exceed the maximum size as a connection error of type - FLOW_CONTROL_ERROR. - -
    - -
    - - A receiver that wishes to use a smaller flow control window than the current size can - send a new SETTINGS frame. However, the receiver MUST be prepared to - receive data that exceeds this window size, since the sender might send data that - exceeds the lower limit prior to processing the SETTINGS frame. - - - After sending a SETTINGS frame that reduces the initial flow control window size, a - receiver has two options for handling streams that exceed flow control limits: - - - The receiver can immediately send RST_STREAM with - FLOW_CONTROL_ERROR error code for the affected streams. - - - The receiver can accept the streams and tolerate the resulting head of line - blocking, sending WINDOW_UPDATE frames as it consumes data. - - - -
    -
    - -
    - - The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can - be sent on an existing stream, as long as the preceding frame is on the same stream and is - a HEADERS, PUSH_PROMISE or CONTINUATION frame without the - END_HEADERS flag set. - - -
    - -
    - - The CONTINUATION frame payload contains a header block - fragment. - - - - The CONTINUATION frame defines the following flag: - - - - Bit 3 being set indicates that this frame ends a header - block. - - - If the END_HEADERS bit is not set, this frame MUST be followed by another - CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or - a frame on a different stream as a connection - error of type PROTOCOL_ERROR. - - - - - - - The CONTINUATION frame changes the connection state as defined in . - - - - CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received - whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. - - - - A CONTINUATION frame MUST be preceded by a HEADERS, - PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A - recipient that observes violation of this rule MUST respond with a connection error of type - PROTOCOL_ERROR. - -
    -
    - -
    - - Error codes are 32-bit fields that are used in RST_STREAM and - GOAWAY frames to convey the reasons for the stream or connection error. - - - - Error codes share a common code space. Some error codes apply only to either streams or the - entire connection and have no defined semantics in the other context. - - - - The following error codes are defined: - - - The associated condition is not as a result of an error. For example, a - GOAWAY might include this code to indicate graceful shutdown of a - connection. - - - The endpoint detected an unspecific protocol error. This error is for use when a more - specific error code is not available. - - - The endpoint encountered an unexpected internal error. - - - The endpoint detected that its peer violated the flow control protocol. - - - The endpoint sent a SETTINGS frame, but did not receive a response in a - timely manner. See Settings Synchronization. - - - The endpoint received a frame after a stream was half closed. - - - The endpoint received a frame with an invalid size. - - - The endpoint refuses the stream prior to performing any application processing, see - for details. - - - Used by the endpoint to indicate that the stream is no longer needed. - - - The endpoint is unable to maintain the header compression context for the connection. - - - The connection established in response to a CONNECT - request was reset or abnormally closed. - - - The endpoint detected that its peer is exhibiting a behavior that might be generating - excessive load. - - - The underlying transport has properties that do not meet minimum security - requirements (see ). - - - - - Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be - treated by an implementation as being equivalent to INTERNAL_ERROR. - -
    - -
    - - HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means - that, from the application perspective, the features of the protocol are largely - unchanged. To achieve this, all request and response semantics are preserved, although the - syntax of conveying those semantics has changed. - - - Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax - and Routing , such as the HTTP and HTTPS URI schemes, are also - applicable in HTTP/2, but the expression of those semantics for this protocol are defined - in the sections below. - - -
    - - A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on - the same stream as the request. - - - An HTTP message (request or response) consists of: - - - for a response only, zero or more HEADERS frames (each followed by zero - or more CONTINUATION frames) containing the message headers of - informational (1xx) HTTP responses (see and ), - and - - - one HEADERS frame (followed by zero or more CONTINUATION - frames) containing the message headers (see ), and - - - zero or more DATA frames containing the message payload (see ), and - - - optionally, one HEADERS frame, followed by zero or more - CONTINUATION frames containing the trailer-part, if present (see ). - - - The last frame in the sequence bears an END_STREAM flag, noting that a - HEADERS frame bearing the END_STREAM flag can be followed by - CONTINUATION frames that carry any remaining portions of the header block. - - - Other frames (from any stream) MUST NOT occur between either HEADERS frame - and any CONTINUATION frames that might follow. - - - - Trailing header fields are carried in a header block that also terminates the stream. - That is, a sequence starting with a HEADERS frame, followed by zero or more - CONTINUATION frames, where the HEADERS frame bears an - END_STREAM flag. Header blocks after the first that do not terminate the stream are not - part of an HTTP request or response. - - - A HEADERS frame (and associated CONTINUATION frames) can - only appear at the start or end of a stream. An endpoint that receives a - HEADERS frame without the END_STREAM flag set after receiving a final - (non-informational) status code MUST treat the corresponding request or response as malformed. - - - - An HTTP request/response exchange fully consumes a single stream. A request starts with - the HEADERS frame that puts the stream into an "open" state. The request - ends with a frame bearing END_STREAM, which causes the stream to become "half closed - (local)" for the client and "half closed (remote)" for the server. A response starts with - a HEADERS frame and ends with a frame bearing END_STREAM, which places the - stream in the "closed" state. - - - -
    - - HTTP/2 removes support for the 101 (Switching Protocols) informational status code - (). - - - The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. - Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate - their use (see ). - -
    - -
    - - HTTP header fields carry information as a series of key-value pairs. For a listing of - registered HTTP headers, see the Message Header Field Registry maintained at . - - -
    - - While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the - status code for the response, HTTP/2 uses special pseudo-header fields beginning with - ':' character (ASCII 0x3a) for this purpose. - - - Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate - pseudo-header fields other than those defined in this document. - - - Pseudo-header fields are only valid in the context in which they are defined. - Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header - fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST - NOT appear in trailers. Endpoints MUST treat a request or response that contains - undefined or invalid pseudo-header fields as malformed. - - - Just as in HTTP/1.x, header field names are strings of ASCII characters that are - compared in a case-insensitive fashion. However, header field names MUST be converted - to lowercase prior to their encoding in HTTP/2. A request or response containing - uppercase header field names MUST be treated as malformed. - - - All pseudo-header fields MUST appear in the header block before regular header fields. - Any request or response that contains a pseudo-header field that appears in a header - block after a regular header field MUST be treated as malformed. - -
    - -
    - - HTTP/2 does not use the Connection header field to - indicate connection-specific header fields; in this protocol, connection-specific - metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message - containing connection-specific header fields; any message containing - connection-specific header fields MUST be treated as malformed. - - - This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need - to remove any header fields nominated by the Connection header field, along with the - Connection header field itself. Such intermediaries SHOULD also remove other - connection-specific header fields, such as Keep-Alive, Proxy-Connection, - Transfer-Encoding and Upgrade, even if they are not nominated by Connection. - - - One exception to this is the TE header field, which MAY be present in an HTTP/2 - request, but when it is MUST NOT contain any value other than "trailers". - - - - - HTTP/2 purposefully does not support upgrade to another protocol. The handshake - methods described in are believed sufficient to - negotiate the use of alternative protocols. - - - -
    - -
    - - The following pseudo-header fields are defined for HTTP/2 requests: - - - - The :method pseudo-header field includes the HTTP - method (). - - - - - The :scheme pseudo-header field includes the scheme - portion of the target URI (). - - - :scheme is not restricted to http and https schemed URIs. A - proxy or gateway can translate requests for non-HTTP schemes, enabling the use - of HTTP to interact with non-HTTP services. - - - - - The :authority pseudo-header field includes the - authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http - or https schemed URIs. - - - To ensure that the HTTP/1.1 request line can be reproduced accurately, this - pseudo-header field MUST be omitted when translating from an HTTP/1.1 request - that has a request target in origin or asterisk form (see ). Clients that generate - HTTP/2 requests directly SHOULD use the :authority pseudo-header - field instead of the Host header field. An - intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by - copying the value of the :authority pseudo-header - field. - - - - - The :path pseudo-header field includes the path and - query parts of the target URI (the path-absolute - production from and optionally a '?' character - followed by the query production, see and ). A request in asterisk form includes the value '*' for the - :path pseudo-header field. - - - This pseudo-header field MUST NOT be empty for http - or https URIs; http or - https URIs that do not contain a path component - MUST include a value of '/'. The exception to this rule is an OPTIONS request - for an http or https - URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). - - - - - - All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory - pseudo-header fields is malformed. - - - HTTP/2 does not define a way to carry the version identifier that is included in the - HTTP/1.1 request line. - -
    - -
    - - For HTTP/2 responses, a single :status pseudo-header - field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all - responses, otherwise the response is malformed. - - - HTTP/2 does not define a way to carry the version or reason phrase that is included in - an HTTP/1.1 status line. - -
    - -
    - - The Cookie header field can carry a significant amount of - redundant data. - - - The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). - This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from - being separated into different name-value pairs. This can significantly reduce - compression efficiency as individual cookie-pairs are updated. - - - To allow for better compression efficiency, the Cookie header field MAY be split into - separate header fields, each with one or more cookie-pairs. If there are multiple - Cookie header fields after decompression, these MUST be concatenated into a single - octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") - before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a - generic HTTP server application. - -
    - - Therefore, the following two lists of Cookie header fields are semantically - equivalent. - - -
    -
    - -
    - - A malformed request or response is one that is an otherwise valid sequence of HTTP/2 - frames, but is otherwise invalid due to the presence of extraneous frames, prohibited - header fields, the absence of mandatory header fields, or the inclusion of uppercase - header field names. - - - A request or response that includes an entity body can include a content-length header field. A request or response is also - malformed if the value of a content-length header field - does not equal the sum of the DATA frame payload lengths that form the - body. A response that is defined to have no payload, as described in , can have a non-zero - content-length header field, even though no content is - included in DATA frames. - - - Intermediaries that process HTTP requests or responses (i.e., any intermediary not - acting as a tunnel) MUST NOT forward a malformed request or response. Malformed - requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. - - - For malformed requests, a server MAY send an HTTP response prior to closing or - resetting the stream. Clients MUST NOT accept a malformed response. Note that these - requirements are intended to protect against several types of common attacks against - HTTP; they are deliberately strict, because being permissive can expose - implementations to these vulnerabilities. - -
    -
    - -
    - - This section shows HTTP/1.1 requests and responses, with illustrations of equivalent - HTTP/2 requests and responses. - - - An HTTP GET request includes request header fields and no body and is therefore - transmitted as a single HEADERS frame, followed by zero or more - CONTINUATION frames containing the serialized block of request header - fields. The HEADERS frame in the following has both the END_HEADERS and - END_STREAM flags set; no CONTINUATION frames are sent: - - -
    - + END_STREAM - Accept: image/jpeg + END_HEADERS - :method = GET - :scheme = https - :path = /resource - host = example.org - accept = image/jpeg -]]> -
    - - - Similarly, a response that includes only response header fields is transmitted as a - HEADERS frame (again, followed by zero or more - CONTINUATION frames) containing the serialized block of response header - fields. - - -
    - + END_STREAM - Expires: Thu, 23 Jan ... + END_HEADERS - :status = 304 - etag = "xyzzy" - expires = Thu, 23 Jan ... -]]> -
    - - - An HTTP POST request that includes request header fields and payload data is transmitted - as one HEADERS frame, followed by zero or more - CONTINUATION frames containing the request header fields, followed by one - or more DATA frames, with the last CONTINUATION (or - HEADERS) frame having the END_HEADERS flag set and the final - DATA frame having the END_STREAM flag set: - - -
    - - END_STREAM - Content-Type: image/jpeg - END_HEADERS - Content-Length: 123 :method = POST - :path = /resource - {binary data} :scheme = https - - CONTINUATION - + END_HEADERS - content-type = image/jpeg - host = example.org - content-length = 123 - - DATA - + END_STREAM - {binary data} -]]> - - Note that data contributing to any given header field could be spread between header - block fragments. The allocation of header fields to frames in this example is - illustrative only. - -
    - - - A response that includes header fields and payload data is transmitted as a - HEADERS frame, followed by zero or more CONTINUATION - frames, followed by one or more DATA frames, with the last - DATA frame in the sequence having the END_STREAM flag set: - - -
    - - END_STREAM - Content-Length: 123 + END_HEADERS - :status = 200 - {binary data} content-type = image/jpeg - content-length = 123 - - DATA - + END_STREAM - {binary data} -]]> -
    - - - Trailing header fields are sent as a header block after both the request or response - header block and all the DATA frames have been sent. The - HEADERS frame starting the trailers header block has the END_STREAM flag - set. - - -
    - - END_STREAM - Transfer-Encoding: chunked + END_HEADERS - Trailer: Foo :status = 200 - content-length = 123 - 123 content-type = image/jpeg - {binary data} trailer = Foo - 0 - Foo: bar DATA - - END_STREAM - {binary data} - - HEADERS - + END_STREAM - + END_HEADERS - foo = bar -]]> -
    - - -
    - - An informational response using a 1xx status code other than 101 is transmitted as a - HEADERS frame, followed by zero or more CONTINUATION - frames: - - - END_STREAM - + END_HEADERS - :status = 103 - extension-field = bar -]]> -
    -
    - -
    - - In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error - occurs, because there is no means to determine the nature of the error. It is possible - that some server processing occurred prior to the error, which could result in - undesirable effects if the request were reattempted. - - - HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has - not been processed: - - - The GOAWAY frame indicates the highest stream number that might have - been processed. Requests on streams with higher numbers are therefore guaranteed to - be safe to retry. - - - The REFUSED_STREAM error code can be included in a - RST_STREAM frame to indicate that the stream is being closed prior to - any processing having occurred. Any request that was sent on the reset stream can - be safely retried. - - - - - Requests that have not been processed have not failed; clients MAY automatically retry - them, even those with non-idempotent methods. - - - A server MUST NOT indicate that a stream has not been processed unless it can guarantee - that fact. If frames that are on a stream are passed to the application layer for any - stream, then REFUSED_STREAM MUST NOT be used for that stream, and a - GOAWAY frame MUST include a stream identifier that is greater than or - equal to the given stream identifier. - - - In addition to these mechanisms, the PING frame provides a way for a - client to easily test a connection. Connections that remain idle can become broken as - some middleboxes (for instance, network address translators, or load balancers) silently - discard connection bindings. The PING frame allows a client to safely - test whether a connection is still active without sending a request. - -
    -
    - -
    - - HTTP/2 allows a server to pre-emptively send (or "push") responses (along with - corresponding "promised" requests) to a client in association with a previous - client-initiated request. This can be useful when the server knows the client will need - to have those responses available in order to fully process the response to the original - request. - - - - Pushing additional message exchanges in this fashion is optional, and is negotiated - between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set - to 0 to indicate that server push is disabled. - - - Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a - promised request that is not cacheable, unsafe or that includes a request body MUST - reset the stream with a stream error of type - PROTOCOL_ERROR. - - - Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP - cache. Pushed responses are considered successfully validated on the origin server (e.g., - if the "no-cache" cache response directive is present) while the stream identified by the - promised stream ID is still open. - - - Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY - be made available to the application separately. - - - An intermediary can receive pushes from the server and choose not to forward them on to - the client. In other words, how to make use of the pushed information is up to that - intermediary. Equally, the intermediary might choose to make additional pushes to the - client, without any action taken by the server. - - - A client cannot push. Thus, servers MUST treat the receipt of a - PUSH_PROMISE frame as a connection - error of type PROTOCOL_ERROR. Clients MUST reject any attempt to - change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating - the message as a connection error of type - PROTOCOL_ERROR. - - -
    - - Server push is semantically equivalent to a server responding to a request; however, in - this case that request is also sent by the server, as a PUSH_PROMISE - frame. - - - The PUSH_PROMISE frame includes a header block that contains a complete - set of request header fields that the server attributes to the request. It is not - possible to push a response to a request that includes a request body. - - - - Pushed responses are always associated with an explicit request from the client. The - PUSH_PROMISE frames sent by the server are sent on that explicit - request's stream. The PUSH_PROMISE frame also includes a promised stream - identifier, chosen from the stream identifiers available to the server (see ). - - - - The header fields in PUSH_PROMISE and any subsequent - CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in - the :method header field that is safe and cacheable. If a - client receives a PUSH_PROMISE that does not include a complete and valid - set of header fields, or the :method header field identifies - a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. - - - - The server SHOULD send PUSH_PROMISE () - frames prior to sending any frames that reference the promised responses. This avoids a - race where clients issue requests prior to receiving any PUSH_PROMISE - frames. - - - For example, if the server receives a request for a document containing embedded links - to multiple image files, and the server chooses to push those additional images to the - client, sending push promises before the DATA frames that contain the - image links ensures that the client is able to see the promises before discovering - embedded links. Similarly, if the server pushes responses referenced by the header block - (for instance, in Link header fields), sending the push promises before sending the - header block ensures that clients do not request them. - - - - PUSH_PROMISE frames MUST NOT be sent by the client. - - - PUSH_PROMISE frames can be sent by the server in response to any - client-initiated stream, but the stream MUST be in either the "open" or "half closed - (remote)" state with respect to the server. PUSH_PROMISE frames are - interspersed with the frames that comprise a response, though they cannot be - interspersed with HEADERS and CONTINUATION frames that - comprise a single header block. - - - Sending a PUSH_PROMISE frame creates a new stream and puts the stream - into the “reserved (local)†state for the server and the “reserved (remote)†state for - the client. - -
    - -
    - - After sending the PUSH_PROMISE frame, the server can begin delivering the - pushed response as a response on a server-initiated - stream that uses the promised stream identifier. The server uses this stream to - transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" - to the client after the initial HEADERS frame is sent. - - - - Once a client receives a PUSH_PROMISE frame and chooses to accept the - pushed response, the client SHOULD NOT issue any requests for the promised response - until after the promised stream has closed. - - - - If the client determines, for any reason, that it does not wish to receive the pushed - response from the server, or if the server takes too long to begin sending the promised - response, the client can send an RST_STREAM frame, using either the - CANCEL or REFUSED_STREAM codes, and referencing the pushed - stream's identifier. - - - A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the - number of responses that can be concurrently pushed by a server. Advertising a - SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by - preventing the server from creating the necessary streams. This does not prohibit a - server from sending PUSH_PROMISE frames; clients need to reset any - promised streams that are not wanted. - - - - Clients receiving a pushed response MUST validate that either the server is - authoritative (see ), or the proxy that provided the pushed - response is configured for the corresponding request. For example, a server that offers - a certificate for only the example.com DNS-ID or Common Name - is not permitted to push a response for https://www.example.org/doc. - - - The response for a PUSH_PROMISE stream begins with a - HEADERS frame, which immediately puts the stream into the “half closed - (remote)†state for the server and “half closed (local)†state for the client, and ends - with a frame bearing END_STREAM, which places the stream in the "closed" state. - - - The client never sends a frame with the END_STREAM flag for a server push. - - - -
    - -
    - -
    - - In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. - CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin - server for the purposes of interacting with https resources. - - - In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to - a remote host, for similar purposes. The HTTP header field mapping works as defined in - Request Header Fields, with a few - differences. Specifically: - - - The :method header field is set to CONNECT. - - - The :scheme and :path header - fields MUST be omitted. - - - The :authority header field contains the host and port to - connect to (equivalent to the authority-form of the request-target of CONNECT - requests, see ). - - - - - A proxy that supports CONNECT establishes a TCP connection to - the server identified in the :authority header field. Once - this connection is successfully established, the proxy sends a HEADERS - frame containing a 2xx series status code to the client, as defined in . - - - After the initial HEADERS frame sent by each peer, all subsequent - DATA frames correspond to data sent on the TCP connection. The payload of - any DATA frames sent by the client is transmitted by the proxy to the TCP - server; data received from the TCP server is assembled into DATA frames by - the proxy. Frame types other than DATA or stream management frames - (RST_STREAM, WINDOW_UPDATE, and PRIORITY) - MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. - - - The TCP connection can be closed by either peer. The END_STREAM flag on a - DATA frame is treated as being equivalent to the TCP FIN bit. A client is - expected to send a DATA frame with the END_STREAM flag set after receiving - a frame bearing the END_STREAM flag. A proxy that receives a DATA frame - with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP - segment. A proxy that receives a TCP segment with the FIN bit set sends a - DATA frame with the END_STREAM flag set. Note that the final TCP segment - or DATA frame could be empty. - - - A TCP connection error is signaled with RST_STREAM. A proxy treats any - error in the TCP connection, which includes receiving a TCP segment with the RST bit set, - as a stream error of type - CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the - RST bit set if it detects an error with the stream or the HTTP/2 connection. - -
    -
    - -
    - - This section outlines attributes of the HTTP protocol that improve interoperability, reduce - exposure to known security vulnerabilities, or reduce the potential for implementation - variation. - - -
    - - HTTP/2 connections are persistent. For best performance, it is expected clients will not - close connections until it is determined that no further communication with a server is - necessary (for example, when a user navigates away from a particular web page), or until - the server closes the connection. - - - Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, - where host is derived from a URI, a selected alternative - service, or a configured proxy. - - - A client can create additional connections as replacements, either to replace connections - that are near to exhausting the available stream - identifier space, to refresh the keying material for a TLS connection, or to - replace connections that have encountered errors. - - - A client MAY open multiple connections to the same IP address and TCP port using different - Server Name Indication values or to provide different TLS - client certificates, but SHOULD avoid creating multiple connections with the same - configuration. - - - Servers are encouraged to maintain open connections for as long as possible, but are - permitted to terminate idle connections if necessary. When either endpoint chooses to - close the transport-layer TCP connection, the terminating endpoint SHOULD first send a - GOAWAY () frame so that both endpoints can reliably - determine whether previously sent frames have been processed and gracefully complete or - terminate any necessary remaining tasks. - - -
    - - Connections that are made to an origin servers, either directly or through a tunnel - created using the CONNECT method MAY be reused for - requests with multiple different URI authority components. A connection can be reused - as long as the origin server is authoritative. For - http resources, this depends on the host having resolved to - the same IP address. - - - For https resources, connection reuse additionally depends - on having a certificate that is valid for the host in the URI. An origin server might - offer a certificate with multiple subjectAltName attributes, - or names with wildcards, one of which is valid for the authority in the URI. For - example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for - requests to URIs starting with https://a.example.com/ and - https://b.example.com/. - - - In some deployments, reusing a connection for multiple origins can result in requests - being directed to the wrong origin server. For example, TLS termination might be - performed by a middlebox that uses the TLS Server Name Indication - (SNI) extension to select an origin server. This means that it is possible - for clients to send confidential information to servers that might not be the intended - target for the request, even though the server is otherwise authoritative. - - - A server that does not wish clients to reuse connections can indicate that it is not - authoritative for a request by sending a 421 (Misdirected Request) status code in response - to the request (see ). - - - A client that is configured to use a proxy over HTTP/2 directs requests to that proxy - through a single connection. That is, all requests sent via a proxy reuse the - connection to the proxy. - -
    - -
    - - The 421 (Misdirected Request) status code indicates that the request was directed at a - server that is not able to produce a response. This can be sent by a server that is not - configured to produce responses for the combination of scheme and authority that are - included in the request URI. - - - Clients receiving a 421 (Misdirected Request) response from a server MAY retry the - request - whether the request method is idempotent or not - over a different connection. - This is possible if a connection is reused () or if an alternative - service is selected (). - - - This status code MUST NOT be generated by proxies. - - - A 421 response is cacheable by default; i.e., unless otherwise indicated by the method - definition or explicit cache controls (see ). - -
    -
    - -
    - - Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over - TLS. The general TLS usage guidance in SHOULD be followed, with - some additional restrictions that are specific to HTTP/2. - - - - An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on - feature set and cipher suite described in this section. Due to implementation - limitations, it might not be possible to fail TLS negotiation. An endpoint MUST - immediately terminate an HTTP/2 connection that does not meet these minimum requirements - with a connection error of type - INADEQUATE_SECURITY. - - -
    - - The TLS implementation MUST support the Server Name Indication - (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when - negotiating TLS. - - - The TLS implementation MUST disable compression. TLS compression can lead to the - exposure of information that would not otherwise be revealed . - Generic compression is unnecessary since HTTP/2 provides compression features that are - more aware of context and therefore likely to be more appropriate for use for - performance, security or other reasons. - - - The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS - renegotiation as a connection error of type - PROTOCOL_ERROR. Note that disabling renegotiation can result in - long-lived connections becoming unusable due to limits on the number of messages the - underlying cipher suite can encipher. - - - A client MAY use renegotiation to provide confidentiality protection for client - credentials offered in the handshake, but any renegotiation MUST occur prior to sending - the connection preface. A server SHOULD request a client certificate if it sees a - renegotiation request immediately after establishing a connection. - - - This effectively prevents the use of renegotiation in response to a request for a - specific protected resource. A future specification might provide a way to support this - use case. - -
    - -
    - - The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST - only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST - have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. - Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher - suites that use stream or block ciphers. Authenticated Encryption with Additional Data - (AEAD) modes, such as the Galois Counter Model (GCM) mode for - AES are acceptable. - - - The effect of these restrictions is that TLS 1.2 implementations could have - non-intersecting sets of available cipher suites, since these prevent the use of the - cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of - HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . - - - Clients MAY advertise support of cipher suites that are prohibited by the above - restrictions in order to allow for connection to servers that do not support HTTP/2. - This enables a fallback to protocols without these constraints without the additional - latency imposed by using a separate connection for fallback. - -
    -
    -
    - -
    -
    - - HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is - authoritative in providing a given response, see . This relies on local name resolution for the "http" - URI scheme, and the authenticated server identity for the "https" scheme (see ). - -
    - -
    - - In a cross-protocol attack, an attacker causes a client to initiate a transaction in one - protocol toward a server that understands a different protocol. An attacker might be able - to cause the transaction to appear as valid transaction in the second protocol. In - combination with the capabilities of the web context, this can be used to interact with - poorly protected servers in private networks. - - - Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient - protection against cross protocol attacks. ALPN provides a positive indication that a - server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based - protocols. - - - The encryption in TLS makes it difficult for attackers to control the data which could be - used in a cross-protocol attack on a cleartext protocol. - - - The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. - The connection preface contains a string that is - designed to confuse HTTP/1.1 servers, but no special protection is offered for other - protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an - Upgrade header field in addition to the client connection preface could be exposed to a - cross-protocol attack. - -
    - -
    - - HTTP/2 header field names and values are encoded as sequences of octets with a length - prefix. This enables HTTP/2 to carry any string of octets as the name or value of a - header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 - directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might - exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal - header fields, extra header fields, or even new messages that are entirely falsified. - - - Header field names or values that contain characters not permitted by HTTP/1.1, including - carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an - intermediary, as stipulated in . - - - Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. - Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. - -
    - -
    - - Pushed responses do not have an explicit request from the client; the request - is provided by the server in the PUSH_PROMISE frame. - - - Caching responses that are pushed is possible based on the guidance provided by the origin - server in the Cache-Control header field. However, this can cause issues if a single - server hosts more than one tenant. For example, a server might offer multiple users each - a small portion of its URI space. - - - Where multiple tenants share space on the same server, that server MUST ensure that - tenants are not able to push representations of resources that they do not have authority - over. Failure to enforce this would allow a tenant to provide a representation that would - be served out of cache, overriding the actual representation that the authoritative tenant - provides. - - - Pushed responses for which an origin server is not authoritative (see - ) are never cached or used. - -
    - -
    - - An HTTP/2 connection can demand a greater commitment of resources to operate than a - HTTP/1.1 connection. The use of header compression and flow control depend on a - commitment of resources for storing a greater amount of state. Settings for these - features ensure that memory commitments for these features are strictly bounded. - - - The number of PUSH_PROMISE frames is not constrained in the same fashion. - A client that accepts server push SHOULD limit the number of streams it allows to be in - the "reserved (remote)" state. Excessive number of server push streams can be treated as - a stream error of type - ENHANCE_YOUR_CALM. - - - Processing capacity cannot be guarded as effectively as state capacity. - - - The SETTINGS frame can be abused to cause a peer to expend additional - processing time. This might be done by pointlessly changing SETTINGS parameters, setting - multiple undefined parameters, or changing the same setting multiple times in the same - frame. WINDOW_UPDATE or PRIORITY frames can be abused to - cause an unnecessary waste of resources. - - - Large numbers of small or empty frames can be abused to cause a peer to expend time - processing frame headers. Note however that some uses are entirely legitimate, such as - the sending of an empty DATA frame to end a stream. - - - Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. - - - Limits in SETTINGS parameters cannot be reduced instantaneously, which - leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In - particular, immediately after establishing a connection, limits set by a server are not - known to clients and could be exceeded without being an obvious protocol violation. - - - All these features - i.e., SETTINGS changes, small frames, header - compression - have legitimate uses. These features become a burden only when they are - used unnecessarily or to excess. - - - An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of - service attack. Implementations SHOULD track the use of these features and set limits on - their use. An endpoint MAY treat activity that is suspicious as a connection error of type - ENHANCE_YOUR_CALM. - - -
    - - A large header block can cause an implementation to - commit a large amount of state. Header fields that are critical for routing can appear - toward the end of a header block, which prevents streaming of header fields to their - ultimate destination. For this an other reasons, such as ensuring cache correctness, - means that an endpoint might need to buffer the entire header block. Since there is no - hard limit to the size of a header block, some endpoints could be forced commit a large - amount of available memory for header fields. - - - An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of - limits that might apply on the size of header blocks. This setting is only advisory, so - endpoints MAY choose to send header blocks that exceed this limit and risk having the - request or response being treated as malformed. This setting specific to a connection, - so any request or response could encounter a hop with a lower, unknown limit. An - intermediary can attempt to avoid this problem by passing on values presented by - different peers, but they are not obligated to do so. - - - A server that receives a larger header block than it is willing to handle can send an - HTTP 431 (Request Header Fields Too Large) status code . A - client can discard responses that it cannot process. The header block MUST be processed - to ensure a consistent connection state, unless the connection is closed. - -
    -
    - -
    - - HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover - secret data when it is compressed in the same context as data under attacker control. - - - There are demonstrable attacks on compression that exploit the characteristics of the web - (e.g., ). The attacker induces multiple requests containing - varying plaintext, observing the length of the resulting ciphertext in each, which - reveals a shorter length when a guess about the secret is correct. - - - Implementations communicating on a secure channel MUST NOT compress content that includes - both confidential and attacker-controlled data unless separate compression dictionaries - are used for each source of data. Compression MUST NOT be used if the source of data - cannot be reliably determined. Generic stream compression, such as that provided by TLS - MUST NOT be used with HTTP/2 (). - - - Further considerations regarding the compression of header fields are described in . - -
    - -
    - - Padding within HTTP/2 is not intended as a replacement for general purpose padding, such - as might be provided by TLS. Redundant padding could even be - counterproductive. Correct application can depend on having specific knowledge of the - data that is being padded. - - - To mitigate attacks that rely on compression, disabling or limiting compression might be - preferable to padding as a countermeasure. - - - Padding can be used to obscure the exact size of frame content, and is provided to - mitigate specific attacks within HTTP. For example, attacks where compressed content - includes both attacker-controlled plaintext and secret data (see for example, ). - - - Use of padding can result in less protection than might seem immediately obvious. At - best, padding only makes it more difficult for an attacker to infer length information by - increasing the number of frames an attacker has to observe. Incorrectly implemented - padding schemes can be easily defeated. In particular, randomized padding with a - predictable distribution provides very little protection; similarly, padding payloads to a - fixed size exposes information as payload sizes cross the fixed size boundary, which could - be possible if an attacker can control plaintext. - - - Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding - for HEADERS and PUSH_PROMISE frames. A valid reason for an - intermediary to change the amount of padding of frames is to improve the protections that - padding provides. - -
    - -
    - - Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions - of a single client or server over time. This includes the value of settings, the manner - in which flow control windows are managed, the way priorities are allocated to streams, - timing of reactions to stimulus, and handling of any optional features. - - - As far as this creates observable differences in behavior, they could be used as a basis - for fingerprinting a specific client, as defined in . - -
    -
    - -
    - - A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation - (ALPN) Protocol IDs" registry established in . - - - This document establishes a registry for frame types, settings, and error codes. These new - registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. - - - This document registers the HTTP2-Settings header field for - use in HTTP; and the 421 (Misdirected Request) status code. - - - This document registers the PRI method for use in HTTP, to avoid - collisions with the connection preface. - - -
    - - This document creates two registrations for the identification of HTTP/2 in the - "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . - - - The "h2" string identifies HTTP/2 when used over TLS: - - HTTP/2 over TLS - 0x68 0x32 ("h2") - This document - - - - The "h2c" string identifies HTTP/2 when used over cleartext TCP: - - HTTP/2 over TCP - 0x68 0x32 0x63 ("h2c") - This document - - -
    - -
    - - This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame - Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under - either of the "IETF Review" or "IESG Approval" policies for - values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for - experimental use. - - - New entries in this registry require the following information: - - - A name or label for the frame type. - - - The 8-bit code assigned to the frame type. - - - A reference to a specification that includes a description of the frame layout, - it's semantics and flags that the frame type uses, including any parts of the frame - that are conditionally present based on the value of flags. - - - - - The entries in the following table are registered by this document. - - - Frame Type - Code - Section - DATA0x0 - HEADERS0x1 - PRIORITY0x2 - RST_STREAM0x3 - SETTINGS0x4 - PUSH_PROMISE0x5 - PING0x6 - GOAWAY0x7 - WINDOW_UPDATE0x8 - CONTINUATION0x9 - -
    - -
    - - This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry - manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to - 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. - - - New registrations are advised to provide the following information: - - - A symbolic name for the setting. Specifying a setting name is optional. - - - The 16-bit code assigned to the setting. - - - An initial value for the setting. - - - An optional reference to a specification that describes the use of the setting. - - - - - An initial set of setting registrations can be found in . - - - Name - Code - Initial Value - Specification - HEADER_TABLE_SIZE - 0x14096 - ENABLE_PUSH - 0x21 - MAX_CONCURRENT_STREAMS - 0x3(infinite) - INITIAL_WINDOW_SIZE - 0x465535 - MAX_FRAME_SIZE - 0x516384 - MAX_HEADER_LIST_SIZE - 0x6(infinite) - - -
    - -
    - - This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" - registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the - "Expert Review" policy. - - - Registrations for error codes are required to include a description of the error code. An - expert reviewer is advised to examine new registrations for possible duplication with - existing error codes. Use of existing registrations is to be encouraged, but not - mandated. - - - New registrations are advised to provide the following information: - - - A name for the error code. Specifying an error code name is optional. - - - The 32-bit error code value. - - - A brief description of the error code semantics, longer if no detailed specification - is provided. - - - An optional reference for a specification that defines the error code. - - - - - The entries in the following table are registered by this document. - - - Name - Code - Description - Specification - NO_ERROR0x0 - Graceful shutdown - - PROTOCOL_ERROR0x1 - Protocol error detected - - INTERNAL_ERROR0x2 - Implementation fault - - FLOW_CONTROL_ERROR0x3 - Flow control limits exceeded - - SETTINGS_TIMEOUT0x4 - Settings not acknowledged - - STREAM_CLOSED0x5 - Frame received for closed stream - - FRAME_SIZE_ERROR0x6 - Frame size incorrect - - REFUSED_STREAM0x7 - Stream not processed - - CANCEL0x8 - Stream cancelled - - COMPRESSION_ERROR0x9 - Compression state not updated - - CONNECT_ERROR0xa - TCP connection error for CONNECT method - - ENHANCE_YOUR_CALM0xb - Processing capacity exceeded - - INADEQUATE_SECURITY0xc - Negotiated TLS parameters not acceptable - - - -
    - -
    - - This section registers the HTTP2-Settings header field in the - Permanent Message Header Field Registry. - - - HTTP2-Settings - - - http - - - standard - - - IETF - - - of this document - - - This header field is only used by an HTTP/2 client for Upgrade-based negotiation. - - - -
    - -
    - - This section registers the PRI method in the HTTP Method - Registry (). - - - PRI - - - No - - - No - - - of this document - - - This method is never used by an actual client. This method will appear to be used - when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection - preface. - - - -
    - -
    - - This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext - Transfer Protocol (HTTP) Status Code Registry (). - - - - - 421 - - - Misdirected Request - - - of this document - - - -
    - -
    - -
    - - This document includes substantial input from the following individuals: - - - Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin - Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin - Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY - contributors). - - - Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). - - - William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto - Peon, Rob Trace (Flow control). - - - Mike Bishop (Extensibility). - - - Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan - (Substantial editorial contributions). - - - Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. - - - Alexey Melnikov was an editor of this document during 2013. - - - A substantial proportion of Martin's contribution was supported by Microsoft during his - employment there. - - - -
    -
    - - - - - - HPACK - Header Compression for HTTP/2 - - - - - - - - - - - - Transmission Control Protocol - - - University of Southern California (USC)/Information Sciences - Institute - - - - - - - - - - - Key words for use in RFCs to Indicate Requirement Levels - - - Harvard University -
    sob@harvard.edu
    -
    - -
    - - -
    - - - - - HTTP Over TLS - - - - - - - - - - Uniform Resource Identifier (URI): Generic - Syntax - - - - - - - - - - - - The Base16, Base32, and Base64 Data Encodings - - - - - - - - - Guidelines for Writing an IANA Considerations Section in RFCs - - - - - - - - - - - Augmented BNF for Syntax Specifications: ABNF - - - - - - - - - - - The Transport Layer Security (TLS) Protocol Version 1.2 - - - - - - - - - - - Transport Layer Security (TLS) Extensions: Extension Definitions - - - - - - - - - - Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension - - - - - - - - - - - - - TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois - Counter Mode (GCM) - - - - - - - - - - - Digital Signature Standard (DSS) - - NIST - - - - - - - - - Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing - - Adobe Systems Incorporated -
    fielding@gbiv.com
    -
    - - greenbytes GmbH -
    julian.reschke@greenbytes.de
    -
    - -
    - - -
    - - - - Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content - - Adobe Systems Incorporated -
    fielding@gbiv.com
    -
    - - greenbytes GmbH -
    julian.reschke@greenbytes.de
    -
    - -
    - - -
    - - - Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests - - Adobe Systems Incorporated -
    fielding@gbiv.com
    -
    - - greenbytes GmbH -
    julian.reschke@greenbytes.de
    -
    - -
    - -
    - - - Hypertext Transfer Protocol (HTTP/1.1): Range Requests - - Adobe Systems Incorporated -
    fielding@gbiv.com
    -
    - - World Wide Web Consortium -
    ylafon@w3.org
    -
    - - greenbytes GmbH -
    julian.reschke@greenbytes.de
    -
    - -
    - -
    - - - Hypertext Transfer Protocol (HTTP/1.1): Caching - - Adobe Systems Incorporated -
    fielding@gbiv.com
    -
    - - Akamai -
    mnot@mnot.net
    -
    - - greenbytes GmbH -
    julian.reschke@greenbytes.de
    -
    - -
    - - -
    - - - Hypertext Transfer Protocol (HTTP/1.1): Authentication - - Adobe Systems Incorporated -
    fielding@gbiv.com
    -
    - - greenbytes GmbH -
    julian.reschke@greenbytes.de
    -
    - -
    - - -
    - - - - HTTP State Management Mechanism - - - - - -
    - - - - - - TCP Extensions for High Performance - - - - - - - - - - - - Transport Layer Security Protocol Compression Methods - - - - - - - - - Additional HTTP Status Codes - - - - - - - - - - - Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) - - - - - - - - - - - - - - - AES Galois Counter Mode (GCM) Cipher Suites for TLS - - - - - - - - - - - - HTML5 - - - - - - - - - - - Latest version available at - . - - - - - - - Talking to Yourself for Fun and Profit - - - - - - - - - - - - - - BREACH: Reviving the CRIME Attack - - - - - - - - - - - Registration Procedures for Message Header Fields - - Nine by Nine -
    GK-IETF@ninebynine.org
    -
    - - BEA Systems -
    mnot@pobox.com
    -
    - - HP Labs -
    JeffMogul@acm.org
    -
    - -
    - - -
    - - - - Recommendations for Secure Use of TLS and DTLS - - - - - - - - - - - - - - - - - - HTTP Alternative Services - - - Akamai - - - Mozilla - - - greenbytes - - - - - - -
    - -
    - - This section is to be removed by RFC Editor before publication. - - -
    - - Renamed Not Authoritative status code to Misdirected Request. - -
    - -
    - - Pseudo-header fields are now required to appear strictly before regular ones. - - - Restored 1xx series status codes, except 101. - - - Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting - to limit the damage. - - - Added a setting to advise peers of header set size limits. - - - Removed segments. - - - Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. - -
    - -
    - - Restored extensibility options. - - - Restricting TLS cipher suites to AEAD only. - - - Removing Content-Encoding requirements. - - - Permitting the use of PRIORITY after stream close. - - - Removed ALTSVC frame. - - - Removed BLOCKED frame. - - - Reducing the maximum padding size to 256 octets; removing padding from - CONTINUATION frames. - - - Removed per-frame GZIP compression. - -
    - -
    - - Added BLOCKED frame (at risk). - - - Simplified priority scheme. - - - Added DATA per-frame GZIP compression. - -
    - -
    - - Changed "connection header" to "connection preface" to avoid confusion. - - - Added dependency-based stream prioritization. - - - Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. - - - Adding missing padding to PUSH_PROMISE. - - - Integrate ALTSVC frame and supporting text. - - - Dropping requirement on "deflate" Content-Encoding. - - - Improving security considerations around use of compression. - -
    - -
    - - Adding padding for data frames. - - - Renumbering frame types, error codes, and settings. - - - Adding INADEQUATE_SECURITY error code. - - - Updating TLS usage requirements to 1.2; forbidding TLS compression. - - - Removing extensibility for frames and settings. - - - Changing setting identifier size. - - - Removing the ability to disable flow control. - - - Changing the protocol identification token to "h2". - - - Changing the use of :authority to make it optional and to allow userinfo in non-HTTP - cases. - - - Allowing split on 0x0 for Cookie. - - - Reserved PRI method in HTTP/1.1 to avoid possible future collisions. - -
    - -
    - - Added cookie crumbling for more efficient header compression. - - - Added header field ordering with the value-concatenation mechanism. - -
    - -
    - - Marked draft for implementation. - -
    - -
    - - Adding definition for CONNECT method. - - - Constraining the use of push to safe, cacheable methods with no request body. - - - Changing from :host to :authority to remove any potential confusion. - - - Adding setting for header compression table size. - - - Adding settings acknowledgement. - - - Removing unnecessary and potentially problematic flags from CONTINUATION. - - - Added denial of service considerations. - -
    -
    - - Marking the draft ready for implementation. - - - Renumbering END_PUSH_PROMISE flag. - - - Editorial clarifications and changes. - -
    - -
    - - Added CONTINUATION frame for HEADERS and PUSH_PROMISE. - - - PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is - zero. - - - Push expanded to allow all safe methods without a request body. - - - Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 - hop-by-hop header fields. - - - Requiring that intermediaries not forward requests with missing or illegal routing - :-headers. - - - Clarified requirements around handling different frames after stream close, stream reset - and GOAWAY. - - - Added more specific prohibitions for sending of different frame types in various stream - states. - - - Making the last received setting value the effective value. - - - Clarified requirements on TLS version, extension and ciphers. - -
    - -
    - - Committed major restructuring atrocities. - - - Added reference to first header compression draft. - - - Added more formal description of frame lifecycle. - - - Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. - - - Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. - - - Added PRIORITY frame. - -
    - -
    - - Added continuations to frames carrying header blocks. - - - Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful - concepts, like cookies. - - - Removed "message". - - - Switched to TLS ALPN from NPN. - - - Editorial changes. - -
    - -
    - - Added IANA considerations section for frame types, error codes and settings. - - - Removed data frame compression. - - - Added PUSH_PROMISE. - - - Added globally applicable flags to framing. - - - Removed zlib-based header compression mechanism. - - - Updated references. - - - Clarified stream identifier reuse. - - - Removed CREDENTIALS frame and associated mechanisms. - - - Added advice against naive implementation of flow control. - - - Added session header section. - - - Restructured frame header. Removed distinction between data and control frames. - - - Altered flow control properties to include session-level limits. - - - Added note on cacheability of pushed resources and multiple tenant servers. - - - Changed protocol label form based on discussions. - -
    - -
    - - Changed title throughout. - - - Removed section on Incompatibilities with SPDY draft#2. - - - Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . - - - Replaced abstract and introduction. - - - Added section on starting HTTP/2.0, including upgrade mechanism. - - - Removed unused references. - - - Added flow control principles based on . - -
    - -
    - - Adopted as base for draft-ietf-httpbis-http2. - - - Updated authors/editors list. - - - Added status note. - -
    -
    - -
    -
    - diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go deleted file mode 100644 index adb77ffa..00000000 --- a/vendor/golang.org/x/net/http2/transport.go +++ /dev/null @@ -1,2275 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code. - -package http2 - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/rand" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "math" - mathrand "math/rand" - "net" - "net/http" - "sort" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/idna" - "golang.org/x/net/lex/httplex" -) - -const ( - // transportDefaultConnFlow is how many connection-level flow control - // tokens we give the server at start-up, past the default 64k. - transportDefaultConnFlow = 1 << 30 - - // transportDefaultStreamFlow is how many stream-level flow - // control tokens we announce to the peer, and how many bytes - // we buffer per stream. - transportDefaultStreamFlow = 4 << 20 - - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - - defaultUserAgent = "Go-http-client/2.0" -) - -// Transport is an HTTP/2 Transport. -// -// A Transport internally caches connections to servers. It is safe -// for concurrent use by multiple goroutines. -type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. - // - // If DialTLS is nil, tls.Dial is used. - // - // If the returned net.Conn has a ConnectionState method like tls.Conn, - // it will be used to set http.Response.TLS. - DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // ConnPool optionally specifies an alternate connection pool to use. - // If nil, the default is used. - ConnPool ClientConnPool - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // AllowHTTP, if true, permits HTTP/2 requests using the insecure, - // plain-text "http" scheme. Note that this does not enable h2c support. - AllowHTTP bool - - // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to - // send in the initial settings frame. It is how many bytes - // of response headers are allowed. Unlike the http2 spec, zero here - // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport - // interprets the highest possible value here (0xffffffff or 1<<32-1) - // to mean no limit. - MaxHeaderListSize uint32 - - // t1, if non-nil, is the standard library Transport using - // this transport. Its settings are used (but not its - // RoundTrip method, etc). - t1 *http.Transport - - connPoolOnce sync.Once - connPoolOrDef ClientConnPool // non-nil version of ConnPool -} - -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 - } - if t.MaxHeaderListSize == 0xffffffff { - return 0 - } - return t.MaxHeaderListSize -} - -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) -} - -var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - -// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. -func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go - return err -} - -func (t *Transport) connPool() ClientConnPool { - t.connPoolOnce.Do(t.initConnPool) - return t.connPoolOrDef -} - -func (t *Transport) initConnPool() { - if t.ConnPool != nil { - t.connPoolOrDef = t.ConnPool - } else { - t.connPoolOrDef = &clientConnPool{t: t} - } -} - -// ClientConn is the state of a single HTTP/2 client connection to an -// HTTP/2 server. -type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - singleUse bool // whether being used for a single http.Request - - // readLoop goroutine fields: - readerDone chan struct{} // closed on error - readerErr error // set before readerDone is closed - - idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer - - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - bw *bufio.Writer - br *bufio.Reader - fr *Framer - lastActive time.Time - // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - initialWindowSize uint32 - - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte - - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred -} - -// clientStream is the state for a single HTTP/2 stream. One of these -// is created for each Transport.RoundTrip call. -type clientStream struct { - cc *ClientConn - req *http.Request - trace *clientTrace // or nil - ID uint32 - resc chan resAndError - bufPipe pipe // buffered pipe with the flow-controlled response payload - startedWrite bool // started request body write; guarded by cc.mu - requestedGzip bool - on100 func() // optional code to run if get a 100 continue response - - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu - - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed - - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu - - // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - - trailer http.Header // accumulated trailers - resTrailer *http.Header // client's Response.Trailer -} - -// awaitRequestCancel waits for the user to cancel a request or for the done -// channel to be signaled. A non-nil error is returned only if the request was -// canceled. -func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { - ctx := reqContext(req) - if req.Cancel == nil && ctx.Done() == nil { - return nil - } - select { - case <-req.Cancel: - return errRequestCanceled - case <-ctx.Done(): - return ctx.Err() - case <-done: - return nil - } -} - -// awaitRequestCancel waits for the user to cancel a request, its context to -// expire, or for the request to be done (any way it might be removed from the -// cc.streams map: peer reset, successful completion, TCP connection breakage, -// etc). If the request is canceled, then cs will be canceled and closed. -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - if err := awaitRequestCancel(req, cs.done); err != nil { - cs.cancelStream() - cs.bufPipe.CloseWithError(err) - } -} - -func (cs *clientStream) cancelStream() { - cc := cs.cc - cc.mu.Lock() - didReset := cs.didReset - cs.didReset = true - cc.mu.Unlock() - - if !didReset { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - cc.forgetStreamID(cs.ID) - } -} - -// checkResetOrDone reports any error sent in a RST_STREAM frame by the -// server, or errStreamClosed if the stream is complete. -func (cs *clientStream) checkResetOrDone() error { - select { - case <-cs.peerReset: - return cs.resetErr - case <-cs.done: - return errStreamClosed - default: - return nil - } -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") - } - cc := cs.cc - cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() - cc.mu.Unlock() -} - -type stickyErrWriter struct { - w io.Writer - err *error -} - -func (sew stickyErrWriter) Write(p []byte) (n int, err error) { - if *sew.err != nil { - return 0, *sew.err - } - n, err = sew.w.Write(p) - *sew.err = err - return -} - -var ErrNoCachedConn = errors.New("http2: no cached connection was available") - -// RoundTripOpt are options for the Transport.RoundTripOpt method. -type RoundTripOpt struct { - // OnlyCachedConn controls whether RoundTripOpt may - // create a new TCP connection. If set true and - // no cached connection is available, RoundTripOpt - // will return ErrNoCachedConn. - OnlyCachedConn bool -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.RoundTripOpt(req, RoundTripOpt{}) -} - -// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) -// and returns a host:port. The port 443 is added if needed. -func authorityAddr(scheme string, authority string) (addr string) { - host, port, err := net.SplitHostPort(authority) - if err != nil { // authority didn't have a port - port = "443" - if scheme == "http" { - port = "80" - } - host = authority - } - if a, err := idna.ToASCII(host); err == nil { - host = a - } - // IPv6 address literal, without a port: - if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { - return host + ":" + port - } - return net.JoinHostPort(host, port) -} - -// RoundTripOpt is like RoundTrip, but takes options. -func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { - return nil, errors.New("http2: unsupported scheme") - } - - addr := authorityAddr(req.URL.Scheme, req.URL.Host) - for retry := 0; ; retry++ { - cc, err := t.connPool().GetClientConn(req, addr) - if err != nil { - t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) - return nil, err - } - traceGotConn(req, cc) - res, err := cc.RoundTrip(req) - if err != nil && retry <= 6 { - afterBodyWrite := false - if e, ok := err.(afterReqBodyWriteError); ok { - err = e - afterBodyWrite = true - } - if req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil { - // After the first retry, do exponential backoff with 10% jitter. - if retry == 0 { - continue - } - backoff := float64(uint(1) << (uint(retry) - 1)) - backoff += backoff * (0.1 * mathrand.Float64()) - select { - case <-time.After(time.Second * time.Duration(backoff)): - continue - case <-reqContext(req).Done(): - return nil, reqContext(req).Err() - } - } - } - if err != nil { - t.vlogf("RoundTrip failure: %v", err) - return nil, err - } - return res, nil - } -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle. -// It does not interrupt any connections currently in use. -func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { - cp.closeIdleConnections() - } -} - -var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") -) - -// afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip. -// It is used to signal that err happened after part of Request.Body was sent to the server. -type afterReqBodyWriteError struct { - err error -} - -func (e afterReqBodyWriteError) Error() string { - return e.err.Error() + "; some request body already written" -} - -// shouldRetryRequest is called by RoundTrip when a request fails to get -// response headers. It is always called with a non-nil error. -// It returns either a request to retry (either the same request, or a -// modified clone), or an error if the request can't be replayed. -func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { - if !canRetryError(err) { - return nil, err - } - if !afterBodyWrite { - return req, nil - } - // If the Body is nil (or http.NoBody), it's safe to reuse - // this request and its Body. - if req.Body == nil || reqBodyIsNoBody(req.Body) { - return req, nil - } - // Otherwise we depend on the Request having its GetBody - // func defined. - getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody == nil { - return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) - } - body, err := getBody() - if err != nil { - return nil, err - } - newReq := *req - newReq.Body = body - return &newReq, nil -} - -func canRetryError(err error) bool { - if err == errClientConnUnusable || err == errClientConnGotGoAway { - return true - } - if se, ok := err.(StreamError); ok { - return se.Code == ErrCodeRefusedStream - } - return false -} - -func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) - if err != nil { - return nil, err - } - return t.newClientConn(tconn, singleUse) -} - -func (t *Transport) newTLSConfig(host string) *tls.Config { - cfg := new(tls.Config) - if t.TLSClientConfig != nil { - *cfg = *cloneTLSConfig(t.TLSClientConfig) - } - if !strSliceContains(cfg.NextProtos, NextProtoTLS) { - cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) - } - if cfg.ServerName == "" { - cfg.ServerName = host - } - return cfg -} - -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS - } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil -} - -// disableKeepAlives reports whether connections should be closed as -// soon as possible after handling the first request. -func (t *Transport) disableKeepAlives() bool { - return t.t1 != nil && t.t1.DisableKeepAlives -} - -func (t *Transport) expectContinueTimeout() time.Duration { - if t.t1 == nil { - return 0 - } - return transportExpectContinueTimeout(t.t1) -} - -func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, false) -} - -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { - cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) - } - if VerboseLogs { - t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) - } - - cc.cond = sync.NewCond(&cc.mu) - cc.flow.add(int32(initialWindowSize)) - - // TODO: adjust this writer size to account for frame size + - // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) - cc.br = bufio.NewReader(c) - cc.fr = NewFramer(cc.bw, cc.br) - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? - cc.henc = hpack.NewEncoder(&cc.hbuf) - - if cs, ok := c.(connectionStater); ok { - state := cs.ConnectionState() - cc.tlsState = &state - } - - initialSettings := []Setting{ - {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxHeaderListSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) - } - - cc.bw.Write(clientPreface) - cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) - cc.bw.Flush() - if cc.werr != nil { - return nil, cc.werr - } - - go cc.readLoop() - return cc, nil -} - -func (cc *ClientConn) setGoAway(f *GoAwayFrame) { - cc.mu.Lock() - defer cc.mu.Unlock() - - old := cc.goAway - cc.goAway = f - - // Merge the previous and current GoAway error frames. - if cc.goAwayDebug == "" { - cc.goAwayDebug = string(f.DebugData()) - } - if old != nil && old.ErrCode != ErrCodeNo { - cc.goAway.ErrCode = old.ErrCode - } - last := f.LastStreamID - for streamID, cs := range cc.streams { - if streamID > last { - select { - case cs.resc <- resAndError{err: errClientConnGotGoAway}: - default: - } - } - } -} - -// CanTakeNewRequest reports whether the connection can take a new request, -// meaning it has not been closed or received or sent a GOAWAY. -func (cc *ClientConn) CanTakeNewRequest() bool { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.canTakeNewRequestLocked() -} - -func (cc *ClientConn) canTakeNewRequestLocked() bool { - if cc.singleUse && cc.nextStreamID > 1 { - return false - } - return cc.goAway == nil && !cc.closed && - int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 -} - -// onIdleTimeout is called from a time.AfterFunc goroutine. It will -// only be called when we're idle, but because we're coming from a new -// goroutine, there could be a new request coming in at the same time, -// so this simply calls the synchronized closeIfIdle to shut down this -// connection. The timer could just call closeIfIdle, but this is more -// clear. -func (cc *ClientConn) onIdleTimeout() { - cc.closeIfIdle() -} - -func (cc *ClientConn) closeIfIdle() { - cc.mu.Lock() - if len(cc.streams) > 0 { - cc.mu.Unlock() - return - } - cc.closed = true - nextID := cc.nextStreamID - // TODO: do clients send GOAWAY too? maybe? Just Close: - cc.mu.Unlock() - - if VerboseLogs { - cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) - } - cc.tconn.Close() -} - -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - -// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not -// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. -var errRequestCanceled = errors.New("net/http: request canceled") - -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - -func (cc *ClientConn) responseHeaderTimeout() time.Duration { - if cc.t.t1 != nil { - return cc.t.t1.ResponseHeaderTimeout - } - // No way to do this (yet?) with just an http2.Transport. Probably - // no need. Request.Cancel this is the new way. We only need to support - // this for compatibility with the old http.Transport fields when - // we're doing transparent http2. - return 0 -} - -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - -// actualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func actualContentLength(req *http.Request) int64 { - if req.Body == nil || reqBodyIsNoBody(req.Body) { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - -func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - if err := checkConnHeaders(req); err != nil { - return nil, err - } - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, err - } - hasTrailers := trailers != "" - - cc.mu.Lock() - if err := cc.awaitOpenSlotForRequest(req); err != nil { - cc.mu.Unlock() - return nil, err - } - - body := req.Body - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - var requestedGzip bool - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - requestedGzip = true - } - - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) - if err != nil { - cc.mu.Unlock() - return nil, err - } - - cs := cc.newStream() - cs.req = req - cs.trace = requestTrace(req) - cs.requestedGzip = requestedGzip - bodyWriter := cc.t.getBodyWriterState(cs, body) - cs.on100 = bodyWriter.on100 - - cc.wmu.Lock() - endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, hdrs) - cc.wmu.Unlock() - traceWroteHeaders(cs.trace) - cc.mu.Unlock() - - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - bodyWriter.cancel() - } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - traceWroteRequest(cs.trace, werr) - return nil, werr - } - - var respHeaderTimer <-chan time.Time - if hasBody { - bodyWriter.scheduleBodyWrite() - } else { - traceWroteRequest(cs.trace, nil) - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - - readLoopResCh := cs.resc - bodyWritten := false - ctx := reqContext(req) - - handleReadLoopResponse := func(re resAndError) (*http.Response, error) { - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - cc.mu.Lock() - afterBodyWrite := cs.startedWrite - cc.mu.Unlock() - cc.forgetStreamID(cs.ID) - if afterBodyWrite { - return nil, afterReqBodyWriteError{re.err} - } - return nil, re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, nil - } - - for { - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - case <-respHeaderTimer: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - cc.forgetStreamID(cs.ID) - return nil, errTimeout - case <-ctx.Done(): - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - cc.forgetStreamID(cs.ID) - return nil, ctx.Err() - case <-req.Cancel: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - cc.forgetStreamID(cs.ID) - return nil, errRequestCanceled - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.resetErr - case err := <-bodyWriter.resc: - // Prefer the read loop's response, if available. Issue 16102. - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - default: - } - if err != nil { - return nil, err - } - bodyWritten = true - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - } -} - -// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. -// Must hold cc.mu. -func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { - var waitingForConn chan struct{} - var waitingForConnErr error // guarded by cc.mu - for { - cc.lastActive = time.Now() - if cc.closed || !cc.canTakeNewRequestLocked() { - return errClientConnUnusable - } - if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { - if waitingForConn != nil { - close(waitingForConn) - } - return nil - } - // Unfortunately, we cannot wait on a condition variable and channel at - // the same time, so instead, we spin up a goroutine to check if the - // request is canceled while we wait for a slot to open in the connection. - if waitingForConn == nil { - waitingForConn = make(chan struct{}) - go func() { - if err := awaitRequestCancel(req, waitingForConn); err != nil { - cc.mu.Lock() - waitingForConnErr = err - cc.cond.Broadcast() - cc.mu.Unlock() - } - }() - } - cc.pendingRequests++ - cc.cond.Wait() - cc.pendingRequests-- - if waitingForConnErr != nil { - return waitingForConnErr - } - } -} - -// requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { - first := true // first frame written (HEADERS is first, then CONTINUATION) - frameSize := int(cc.maxFrameSize) - for len(hdrs) > 0 && cc.werr == nil { - chunk := hdrs - if len(chunk) > frameSize { - chunk = chunk[:frameSize] - } - hdrs = hdrs[len(chunk):] - endHeaders := len(hdrs) == 0 - if first { - cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: chunk, - EndStream: endStream, - EndHeaders: endHeaders, - }) - first = false - } else { - cc.fr.WriteContinuation(streamID, endHeaders, chunk) - } - } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. - cc.bw.Flush() - return cc.werr -} - -// internal error values; they don't escape to callers -var ( - // abort request body write; don't send cancel - errStopReqBodyWrite = errors.New("http2: aborting request body write") - - // abort request body write, but send stream reset of cancel. - errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") -) - -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { - cc := cs.cc - sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) - - defer func() { - traceWroteRequest(cs.trace, err) - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - cerr := bodyCloser.Close() - if err == nil { - err = cerr - } - }() - - req := cs.req - hasTrailers := req.Trailer != nil - - var sawEOF bool - for !sawEOF { - n, err := body.Read(buf) - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - return err - } - - remain := buf[:n] - for len(remain) > 0 && err == nil { - var allowed int32 - allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: - return err - } - cc.wmu.Lock() - data := remain[:allowed] - remain = remain[allowed:] - sentEnd = sawEOF && len(remain) == 0 && !hasTrailers - err = cc.fr.WriteData(cs.ID, sentEnd, data) - if err == nil { - // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or - // opt-out? Use some heuristic on the body type? Nagel-like - // timers? Based on 'n'? Only last chunk of this for loop, - // unless flow control tokens are low? For now, always. - // If we change this, see comment below. - err = cc.bw.Flush() - } - cc.wmu.Unlock() - } - if err != nil { - return err - } - } - - if sentEnd { - // Already sent END_STREAM (which implies we have no - // trailers) and flushed, because currently all - // WriteData frames above get a flush. So we're done. - return nil - } - - var trls []byte - if hasTrailers { - cc.mu.Lock() - trls, err = cc.encodeTrailers(req) - cc.mu.Unlock() - if err != nil { - cc.writeStreamReset(cs.ID, ErrCodeInternal, err) - cc.forgetStreamID(cs.ID) - return err - } - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - // Two ways to send END_STREAM: either with trailers, or - // with an empty DATA frame. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } - if ferr := cc.bw.Flush(); ferr != nil && err == nil { - err = ferr - } - return err -} - -// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow -// control tokens from the server. -// It returns either the non-zero number of tokens taken or an error -// if the stream is dead. -func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - for { - if cc.closed { - return 0, errClientConnClosed - } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody - } - if err := cs.checkResetOrDone(); err != nil { - return 0, err - } - if a := cs.flow.available(); a > 0 { - take := a - if int(take) > maxBytes { - - take = int32(maxBytes) // can't truncate int; take is int32 - } - if take > int32(cc.maxFrameSize) { - take = int32(cc.maxFrameSize) - } - cs.flow.take(take) - return take, nil - } - cc.cond.Wait() - } -} - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httplex.PunycodeHostPort(host) - if err != nil { - return nil, err - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httplex.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) - } - } - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - f(":method", req.Method) - if req.Method != "CONNECT" { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || - strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || - strings.EqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if strings.EqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - cc.writeHeader(strings.ToLower(name), value) - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { - cc.hbuf.Reset() - - hlSize := uint64(0) - for k, vv := range req.Trailer { - for _, v := range vv { - hf := hpack.HeaderField{Name: k, Value: v} - hlSize += uint64(hf.Size()) - } - } - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filtered at the - // start of RoundTrip - lowKey := strings.ToLower(k) - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - return cc.hbuf.Bytes(), nil -} - -func (cc *ClientConn) writeHeader(name, value string) { - if VerboseLogs { - log.Printf("http2: Transport encoding header %q = %q", name, value) - } - cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) -} - -type resAndError struct { - res *http.Response - err error -} - -// requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } - cs.flow.add(int32(cc.initialWindowSize)) - cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) - cc.nextStreamID += 2 - cc.streams[cs.ID] = cs - return cs -} - -func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { - cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - cc.lastActive = time.Now() - delete(cc.streams, id) - if len(cc.streams) == 0 && cc.idleTimer != nil { - cc.idleTimer.Reset(cc.idleTimeout) - } - close(cs.done) - // Wake up checkResetOrDone via clientStream.awaitFlowControl and - // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() - } - return cs -} - -// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. -type clientConnReadLoop struct { - cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID - closeWhenIdle bool -} - -// readLoop runs in its own goroutine and reads and dispatches frames. -func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{ - cc: cc, - activeRes: make(map[uint32]*clientStream), - } - - defer rl.cleanup() - cc.readerErr = rl.run() - if ce, ok := cc.readerErr.(ConnectionError); ok { - cc.wmu.Lock() - cc.fr.WriteGoAway(0, ErrCode(ce), nil) - cc.wmu.Unlock() - } -} - -// GoAwayError is returned by the Transport when the server closes the -// TCP connection after sending a GOAWAY frame. -type GoAwayError struct { - LastStreamID uint32 - ErrCode ErrCode - DebugData string -} - -func (e GoAwayError) Error() string { - return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", - e.LastStreamID, e.ErrCode, e.DebugData) -} - -func isEOFOrNetReadError(err error) bool { - if err == io.EOF { - return true - } - ne, ok := err.(*net.OpError) - return ok && ne.Op == "read" -} - -func (rl *clientConnReadLoop) cleanup() { - cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) - defer close(cc.readerDone) - - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - // Close any response bodies if the server closes prematurely. - // TODO: also do this if we've written the headers but not - // gotten a response yet. - err := cc.readerErr - cc.mu.Lock() - if cc.goAway != nil && isEOFOrNetReadError(err) { - err = GoAwayError{ - LastStreamID: cc.goAway.LastStreamID, - ErrCode: cc.goAway.ErrCode, - DebugData: cc.goAwayDebug, - } - } else if err == io.EOF { - err = io.ErrUnexpectedEOF - } - for _, cs := range rl.activeRes { - cs.bufPipe.CloseWithError(err) - } - for _, cs := range cc.streams { - select { - case cs.resc <- resAndError{err: err}: - default: - } - close(cs.done) - } - cc.closed = true - cc.cond.Broadcast() - cc.mu.Unlock() -} - -func (rl *clientConnReadLoop) run() error { - cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a HEADERS reply - gotSettings := false - for { - f, err := cc.fr.ReadFrame() - if err != nil { - cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) - } - if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, false); cs != nil { - cs.cc.writeStreamReset(cs.ID, se.Code, err) - cs.cc.forgetStreamID(cs.ID) - if se.Cause == nil { - se.Cause = cc.fr.errDetail - } - rl.endStreamError(cs, se) - } - continue - } else if err != nil { - return err - } - if VerboseLogs { - cc.vlogf("http2: Transport received %s", summarizeFrame(f)) - } - if !gotSettings { - if _, ok := f.(*SettingsFrame); !ok { - cc.logf("protocol error: received %T before a SETTINGS frame", f) - return ConnectionError(ErrCodeProtocol) - } - gotSettings = true - } - maybeIdle := false // whether frame might transition us to idle - - switch f := f.(type) { - case *MetaHeadersFrame: - err = rl.processHeaders(f) - maybeIdle = true - gotReply = true - case *DataFrame: - err = rl.processData(f) - maybeIdle = true - case *GoAwayFrame: - err = rl.processGoAway(f) - maybeIdle = true - case *RSTStreamFrame: - err = rl.processResetStream(f) - maybeIdle = true - case *SettingsFrame: - err = rl.processSettings(f) - case *PushPromiseFrame: - err = rl.processPushPromise(f) - case *WindowUpdateFrame: - err = rl.processWindowUpdate(f) - case *PingFrame: - err = rl.processPing(f) - default: - cc.logf("Transport: unhandled response frame type %T", f) - } - if err != nil { - if VerboseLogs { - cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) - } - return err - } - if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { - cc.closeIfIdle() - } - } -} - -func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - if cs == nil { - // We'd get here if we canceled a request while the - // server had its response still in flight. So if this - // was just something we canceled, ignore it. - return nil - } - if !cs.firstByte { - if cs.trace != nil { - // TODO(bradfitz): move first response byte earlier, - // when we first read the 9 byte header, not waiting - // until all the HEADERS+CONTINUATION frames have been - // merged. This works for now. - traceFirstResponseByte(cs.trace) - } - cs.firstByte = true - } - if !cs.pastHeaders { - cs.pastHeaders = true - } else { - return rl.processTrailers(cs, f) - } - - res, err := rl.handleResponse(cs, f) - if err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cs.resc <- resAndError{err: err} - return nil // return nil from process* funcs to keep conn alive - } - if res == nil { - // (nil, nil) special case. See handleResponse docs. - return nil - } - if res.Body != noBody { - rl.activeRes[cs.ID] = cs - } - cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} - return nil -} - -// may return error types nil, or ConnectionError. Any other error value -// is a StreamError of type ErrCodeProtocol. The returned error in that case -// is the detail. -// -// As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. -func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { - if f.Truncated { - return nil, errResponseHeaderListSize - } - - status := f.PseudoValue("status") - if status == "" { - return nil, errors.New("missing status pseudo header") - } - statusCode, err := strconv.Atoi(status) - if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") - } - - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil - } - - header := make(http.Header) - res := &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: header, - StatusCode: statusCode, - Status: status + " " + http.StatusText(statusCode), - } - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - if key == "Trailer" { - t := res.Trailer - if t == nil { - t = make(http.Header) - res.Trailer = t - } - foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - header[key] = append(header[key], hf.Value) - } - } - - streamEnded := f.StreamEnded() - isHead := cs.req.Method == "HEAD" - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } - - if streamEnded || isHead { - res.Body = noBody - return res, nil - } - - cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(cs.req) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - setResponseUncompressed(res) - } - return res, nil -} - -func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !f.StreamEnded() { - // We expect that any headers for trailers also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - if len(f.PseudoFields()) > 0 { - // No pseudo header fields are defined for trailers. - // TODO: ConnectionError might be overly harsh? Check. - return ConnectionError(ErrCodeProtocol) - } - - trailer := make(http.Header) - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - trailer[key] = append(trailer[key], hf.Value) - } - cs.trailer = trailer - - rl.endStream(cs) - return nil -} - -// transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. -type transportResponseBody struct { - cs *clientStream -} - -func (b transportResponseBody) Read(p []byte) (n int, err error) { - cs := b.cs - cc := cs.cc - - if cs.readErr != nil { - return 0, cs.readErr - } - n, err = b.cs.bufPipe.Read(p) - if cs.bytesRemain != -1 { - if int64(n) > cs.bytesRemain { - n = int(cs.bytesRemain) - if err == nil { - err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) - } - cs.readErr = err - return int(cs.bytesRemain), err - } - cs.bytesRemain -= int64(n) - if err == io.EOF && cs.bytesRemain > 0 { - err = io.ErrUnexpectedEOF - cs.readErr = err - return n, err - } - } - if n == 0 { - // No flow control tokens to send back. - return - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } - if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } - } - if connAdd != 0 || streamAdd != 0 { - cc.wmu.Lock() - defer cc.wmu.Unlock() - if connAdd != 0 { - cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) - } - if streamAdd != 0 { - cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) - } - cc.bw.Flush() - } - return -} - -var errClosedResponseBody = errors.New("http2: response body closed") - -func (b transportResponseBody) Close() error { - cs := b.cs - cc := cs.cc - - serverSentStreamEnd := cs.bufPipe.Err() == io.EOF - unread := cs.bufPipe.Len() - - if unread > 0 || !serverSentStreamEnd { - cc.mu.Lock() - cc.wmu.Lock() - if !serverSentStreamEnd { - cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) - cs.didReset = true - } - // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - cc.fr.WriteWindowUpdate(0, uint32(unread)) - } - cc.bw.Flush() - cc.wmu.Unlock() - cc.mu.Unlock() - } - - cs.bufPipe.BreakWithError(errClosedResponseBody) - cc.forgetStreamID(cs.ID) - return nil -} - -func (rl *clientConnReadLoop) processData(f *DataFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - data := f.Data() - if cs == nil { - cc.mu.Lock() - neverSent := cc.nextStreamID - cc.mu.Unlock() - if f.StreamID >= neverSent { - // We never asked for this. - cc.logf("http2: Transport received unsolicited DATA frame; closing connection") - return ConnectionError(ErrCodeProtocol) - } - // We probably did ask for this, but canceled. Just ignore it. - // TODO: be stricter here? only silently ignore things which - // we canceled, but not things which were closed normally - // by the peer? Tough without accumulating too much state. - - // But at least return their flow control: - if f.Length > 0 { - cc.mu.Lock() - cc.inflow.add(int32(f.Length)) - cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() - } - return nil - } - if !cs.firstByte { - cc.logf("protocol error: received DATA before a HEADERS frame") - rl.endStreamError(cs, StreamError{ - StreamID: f.StreamID, - Code: ErrCodeProtocol, - }) - return nil - } - if f.Length > 0 { - // Check connection-level flow control. - cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { - cc.mu.Unlock() - return ConnectionError(ErrCodeFlowControl) - } - // Return any padded flow control now, since we won't - // refund it later on body reads. - var refund int - if pad := int(f.Length) - len(data); pad > 0 { - refund += pad - } - // Return len(data) now if the stream is already closed, - // since data will never be read. - didReset := cs.didReset - if didReset { - refund += len(data) - } - if refund > 0 { - cc.inflow.add(int32(refund)) - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(refund)) - if !didReset { - cs.inflow.add(int32(refund)) - cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) - } - cc.bw.Flush() - cc.wmu.Unlock() - } - cc.mu.Unlock() - - if len(data) > 0 && !didReset { - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } - } - } - - if f.StreamEnded() { - rl.endStream(cs) - } - return nil -} - -var errInvalidTrailers = errors.New("http2: invalid trailers") - -func (rl *clientConnReadLoop) endStream(cs *clientStream) { - // TODO: check that any declared content-length matches, like - // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) -} - -func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - cs.bufPipe.closeWithErrorAndCode(err, code) - delete(rl.activeRes, cs.ID) - if isConnectionCloseRequest(cs.req) { - rl.closeWhenIdle = true - } - - select { - case cs.resc <- resAndError{err: err}: - default: - } -} - -func (cs *clientStream) copyTrailers() { - for k, vv := range cs.trailer { - t := cs.resTrailer - if *t == nil { - *t = make(http.Header) - } - (*t)[k] = vv - } -} - -func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { - cc := rl.cc - cc.t.connPool().MarkDead(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) - return nil -} - -func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - - if f.IsAck() { - if cc.wantSettingsAck { - cc.wantSettingsAck = false - return nil - } - return ConnectionError(ErrCodeProtocol) - } - - err := f.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingMaxHeaderListSize: - cc.peerMaxHeaderListSize = uint64(s.Val) - case SettingInitialWindowSize: - // Values above the maximum flow-control - // window size of 2^31-1 MUST be treated as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - if s.Val > math.MaxInt32 { - return ConnectionError(ErrCodeFlowControl) - } - - // Adjust flow control of currently-open - // frames by the difference of the old initial - // window size and this one. - delta := int32(s.Val) - int32(cc.initialWindowSize) - for _, cs := range cc.streams { - cs.flow.add(delta) - } - cc.cond.Broadcast() - - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. - cc.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - if err != nil { - return err - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - cc.fr.WriteSettingsAck() - cc.bw.Flush() - return cc.werr -} - -func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if f.StreamID != 0 && cs == nil { - return nil - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - fl := &cc.flow - if cs != nil { - fl = &cs.flow - } - if !fl.add(int32(f.Increment)) { - return ConnectionError(ErrCodeFlowControl) - } - cc.cond.Broadcast() - return nil -} - -func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) - if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream - return nil - } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := streamError(cs.ID, f.ErrCode) - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - delete(rl.activeRes, cs.ID) - return nil -} - -// Ping sends a PING frame to the server and waits for the ack. -// Public implementation is in go17.go and not_go17.go -func (cc *ClientConn) ping(ctx contextContext) error { - c := make(chan struct{}) - // Generate a random payload - var p [8]byte - for { - if _, err := rand.Read(p[:]); err != nil { - return err - } - cc.mu.Lock() - // check for dup before insert - if _, found := cc.pings[p]; !found { - cc.pings[p] = c - cc.mu.Unlock() - break - } - cc.mu.Unlock() - } - cc.wmu.Lock() - if err := cc.fr.WritePing(false, p); err != nil { - cc.wmu.Unlock() - return err - } - if err := cc.bw.Flush(); err != nil { - cc.wmu.Unlock() - return err - } - cc.wmu.Unlock() - select { - case <-c: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-cc.readerDone: - // connection closed - return cc.readerErr - } -} - -func (rl *clientConnReadLoop) processPing(f *PingFrame) error { - if f.IsAck() { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - // If ack, notify listener if any - if c, ok := cc.pings[f.Data]; ok { - close(c) - delete(cc.pings, f.Data) - } - return nil - } - cc := rl.cc - cc.wmu.Lock() - defer cc.wmu.Unlock() - if err := cc.fr.WritePing(true, f.Data); err != nil { - return err - } - return cc.bw.Flush() -} - -func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { - // We told the peer we don't want them. - // Spec says: - // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH - // setting of the peer endpoint is set to 0. An endpoint that - // has set this setting and has received acknowledgement MUST - // treat the receipt of a PUSH_PROMISE frame as a connection - // error (Section 5.4.1) of type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) -} - -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: map err to more interesting error codes, once the - // HTTP community comes up with some. But currently for - // RST_STREAM there's no equivalent to GOAWAY frame's debug - // data, and the error codes are all pretty vague ("cancel"). - cc.wmu.Lock() - cc.fr.WriteRSTStream(streamID, code) - cc.bw.Flush() - cc.wmu.Unlock() -} - -var ( - errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") -) - -func (cc *ClientConn) logf(format string, args ...interface{}) { - cc.t.logf(format, args...) -} - -func (cc *ClientConn) vlogf(format string, args ...interface{}) { - cc.t.vlogf(format, args...) -} - -func (t *Transport) vlogf(format string, args ...interface{}) { - if VerboseLogs { - t.logf(format, args...) - } -} - -func (t *Transport) logf(format string, args ...interface{}) { - log.Printf(format, args...) -} - -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) - -func strSliceContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -type erringRoundTripper struct{ err error } - -func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr - } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} - -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } - -// bodyWriterState encapsulates various state around the Transport's writing -// of the request body, particularly regarding doing delayed writes of the body -// when the request contains "Expect: 100-continue". -type bodyWriterState struct { - cs *clientStream - timer *time.Timer // if non-nil, we're doing a delayed write - fnonce *sync.Once // to call fn with - fn func() // the code to run in the goroutine, writing the body - resc chan error // result of fn's execution - delay time.Duration // how long we should delay a delayed write for -} - -func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { - s.cs = cs - if body == nil { - return - } - resc := make(chan error, 1) - s.resc = resc - s.fn = func() { - cs.cc.mu.Lock() - cs.startedWrite = true - cs.cc.mu.Unlock() - resc <- cs.writeRequestBody(body, cs.req.Body) - } - s.delay = t.expectContinueTimeout() - if s.delay == 0 || - !httplex.HeaderValuesContainsToken( - cs.req.Header["Expect"], - "100-continue") { - return - } - s.fnonce = new(sync.Once) - - // Arm the timer with a very large duration, which we'll - // intentionally lower later. It has to be large now because - // we need a handle to it before writing the headers, but the - // s.delay value is defined to not start until after the - // request headers were written. - const hugeDuration = 365 * 24 * time.Hour - s.timer = time.AfterFunc(hugeDuration, func() { - s.fnonce.Do(s.fn) - }) - return -} - -func (s bodyWriterState) cancel() { - if s.timer != nil { - s.timer.Stop() - } -} - -func (s bodyWriterState) on100() { - if s.timer == nil { - // If we didn't do a delayed write, ignore the server's - // bogus 100 continue response. - return - } - s.timer.Stop() - go func() { s.fnonce.Do(s.fn) }() -} - -// scheduleBodyWrite starts writing the body, either immediately (in -// the common case) or after the delay timeout. It should not be -// called until after the headers have been written. -func (s bodyWriterState) scheduleBodyWrite() { - if s.timer == nil { - // We're not doing a delayed write (see - // getBodyWriterState), so just start the writing - // goroutine immediately. - go s.fn() - return - } - traceWait100Continue(s.cs.trace) - if s.timer.Stop() { - s.timer.Reset(s.delay) - } -} - -// isConnectionCloseRequest reports whether req should use its own -// connection for a single request and then close the connection. -func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") -} diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go deleted file mode 100644 index 0126ff48..00000000 --- a/vendor/golang.org/x/net/http2/transport_test.go +++ /dev/null @@ -1,3686 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/http/httptest" - "net/url" - "os" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "golang.org/x/net/http2/hpack" -) - -var ( - extNet = flag.Bool("extnet", false, "do external network tests") - transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport") - insecure = flag.Bool("insecure", false, "insecure TLS dials") // TODO: dead code. remove? -) - -var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true} - -type testContext struct{} - -func (testContext) Done() <-chan struct{} { return make(chan struct{}) } -func (testContext) Err() error { panic("should not be called") } -func (testContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } -func (testContext) Value(key interface{}) interface{} { return nil } - -func TestTransportExternal(t *testing.T) { - if !*extNet { - t.Skip("skipping external network test") - } - req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil) - rt := &Transport{TLSClientConfig: tlsConfigInsecure} - res, err := rt.RoundTrip(req) - if err != nil { - t.Fatalf("%v", err) - } - res.Write(os.Stdout) -} - -type fakeTLSConn struct { - net.Conn -} - -func (c *fakeTLSConn) ConnectionState() tls.ConnectionState { - return tls.ConnectionState{ - Version: tls.VersionTLS12, - CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - } -} - -func startH2cServer(t *testing.T) net.Listener { - h2Server := &Server{} - l := newLocalListener(t) - go func() { - conn, err := l.Accept() - if err != nil { - t.Error(err) - return - } - h2Server.ServeConn(&fakeTLSConn{conn}, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello, %v, http: %v", r.URL.Path, r.TLS == nil) - })}) - }() - return l -} - -func TestTransportH2c(t *testing.T) { - l := startH2cServer(t) - defer l.Close() - req, err := http.NewRequest("GET", "http://"+l.Addr().String()+"/foobar", nil) - if err != nil { - t.Fatal(err) - } - tr := &Transport{ - AllowHTTP: true, - DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { - return net.Dial(network, addr) - }, - } - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - if res.ProtoMajor != 2 { - t.Fatal("proto not h2c") - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if got, want := string(body), "Hello, /foobar, http: true"; got != want { - t.Fatalf("response got %v, want %v", got, want) - } -} - -func TestTransport(t *testing.T) { - const body = "sup" - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, body) - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - t.Fatal(err) - } - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - - t.Logf("Got res: %+v", res) - if g, w := res.StatusCode, 200; g != w { - t.Errorf("StatusCode = %v; want %v", g, w) - } - if g, w := res.Status, "200 OK"; g != w { - t.Errorf("Status = %q; want %q", g, w) - } - wantHeader := http.Header{ - "Content-Length": []string{"3"}, - "Content-Type": []string{"text/plain; charset=utf-8"}, - "Date": []string{"XXX"}, // see cleanDate - } - cleanDate(res) - if !reflect.DeepEqual(res.Header, wantHeader) { - t.Errorf("res Header = %v; want %v", res.Header, wantHeader) - } - if res.Request != req { - t.Errorf("Response.Request = %p; want %p", res.Request, req) - } - if res.TLS == nil { - t.Error("Response.TLS = nil; want non-nil") - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Errorf("Body read: %v", err) - } else if string(slurp) != body { - t.Errorf("Body = %q; want %q", slurp, body) - } -} - -func onSameConn(t *testing.T, modReq func(*http.Request)) bool { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, r.RemoteAddr) - }, optOnlyServer, func(c net.Conn, st http.ConnState) { - t.Logf("conn %v is now state %v", c.RemoteAddr(), st) - }) - defer st.Close() - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - get := func() string { - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - t.Fatal(err) - } - modReq(req) - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("Body read: %v", err) - } - addr := strings.TrimSpace(string(slurp)) - if addr == "" { - t.Fatalf("didn't get an addr in response") - } - return addr - } - first := get() - second := get() - return first == second -} - -func TestTransportReusesConns(t *testing.T) { - if !onSameConn(t, func(*http.Request) {}) { - t.Errorf("first and second responses were on different connections") - } -} - -func TestTransportReusesConn_RequestClose(t *testing.T) { - if onSameConn(t, func(r *http.Request) { r.Close = true }) { - t.Errorf("first and second responses were not on different connections") - } -} - -func TestTransportReusesConn_ConnClose(t *testing.T) { - if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) { - t.Errorf("first and second responses were not on different connections") - } -} - -// Tests that the Transport only keeps one pending dial open per destination address. -// https://golang.org/issue/13397 -func TestTransportGroupsPendingDials(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, r.RemoteAddr) - }, optOnlyServer) - defer st.Close() - tr := &Transport{ - TLSClientConfig: tlsConfigInsecure, - } - defer tr.CloseIdleConnections() - var ( - mu sync.Mutex - dials = map[string]int{} - ) - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - t.Error(err) - return - } - res, err := tr.RoundTrip(req) - if err != nil { - t.Error(err) - return - } - defer res.Body.Close() - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Errorf("Body read: %v", err) - } - addr := strings.TrimSpace(string(slurp)) - if addr == "" { - t.Errorf("didn't get an addr in response") - } - mu.Lock() - dials[addr]++ - mu.Unlock() - }() - } - wg.Wait() - if len(dials) != 1 { - t.Errorf("saw %d dials; want 1: %v", len(dials), dials) - } - tr.CloseIdleConnections() - if err := retry(50, 10*time.Millisecond, func() error { - cp, ok := tr.connPool().(*clientConnPool) - if !ok { - return fmt.Errorf("Conn pool is %T; want *clientConnPool", tr.connPool()) - } - cp.mu.Lock() - defer cp.mu.Unlock() - if len(cp.dialing) != 0 { - return fmt.Errorf("dialing map = %v; want empty", cp.dialing) - } - if len(cp.conns) != 0 { - return fmt.Errorf("conns = %v; want empty", cp.conns) - } - if len(cp.keys) != 0 { - return fmt.Errorf("keys = %v; want empty", cp.keys) - } - return nil - }); err != nil { - t.Errorf("State of pool after CloseIdleConnections: %v", err) - } -} - -func retry(tries int, delay time.Duration, fn func() error) error { - var err error - for i := 0; i < tries; i++ { - err = fn() - if err == nil { - return nil - } - time.Sleep(delay) - } - return err -} - -func TestTransportAbortClosesPipes(t *testing.T) { - shutdown := make(chan struct{}) - st := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - w.(http.Flusher).Flush() - <-shutdown - }, - optOnlyServer, - ) - defer st.Close() - defer close(shutdown) // we must shutdown before st.Close() to avoid hanging - - done := make(chan struct{}) - requestMade := make(chan struct{}) - go func() { - defer close(done) - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - t.Fatal(err) - } - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - close(requestMade) - _, err = ioutil.ReadAll(res.Body) - if err == nil { - t.Error("expected error from res.Body.Read") - } - }() - - <-requestMade - // Now force the serve loop to end, via closing the connection. - st.closeConn() - // deadlock? that's a bug. - select { - case <-done: - case <-time.After(3 * time.Second): - t.Fatal("timeout") - } -} - -// TODO: merge this with TestTransportBody to make TestTransportRequest? This -// could be a table-driven test with extra goodies. -func TestTransportPath(t *testing.T) { - gotc := make(chan *url.URL, 1) - st := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - gotc <- r.URL - }, - optOnlyServer, - ) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - const ( - path = "/testpath" - query = "q=1" - ) - surl := st.ts.URL + path + "?" + query - req, err := http.NewRequest("POST", surl, nil) - if err != nil { - t.Fatal(err) - } - c := &http.Client{Transport: tr} - res, err := c.Do(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - got := <-gotc - if got.Path != path { - t.Errorf("Read Path = %q; want %q", got.Path, path) - } - if got.RawQuery != query { - t.Errorf("Read RawQuery = %q; want %q", got.RawQuery, query) - } -} - -func randString(n int) string { - rnd := rand.New(rand.NewSource(int64(n))) - b := make([]byte, n) - for i := range b { - b[i] = byte(rnd.Intn(256)) - } - return string(b) -} - -type panicReader struct{} - -func (panicReader) Read([]byte) (int, error) { panic("unexpected Read") } -func (panicReader) Close() error { panic("unexpected Close") } - -func TestActualContentLength(t *testing.T) { - tests := []struct { - req *http.Request - want int64 - }{ - // Verify we don't read from Body: - 0: { - req: &http.Request{Body: panicReader{}}, - want: -1, - }, - // nil Body means 0, regardless of ContentLength: - 1: { - req: &http.Request{Body: nil, ContentLength: 5}, - want: 0, - }, - // ContentLength is used if set. - 2: { - req: &http.Request{Body: panicReader{}, ContentLength: 5}, - want: 5, - }, - // http.NoBody means 0, not -1. - 3: { - req: &http.Request{Body: go18httpNoBody()}, - want: 0, - }, - } - for i, tt := range tests { - got := actualContentLength(tt.req) - if got != tt.want { - t.Errorf("test[%d]: got %d; want %d", i, got, tt.want) - } - } -} - -func TestTransportBody(t *testing.T) { - bodyTests := []struct { - body string - noContentLen bool - }{ - {body: "some message"}, - {body: "some message", noContentLen: true}, - {body: strings.Repeat("a", 1<<20), noContentLen: true}, - {body: strings.Repeat("a", 1<<20)}, - {body: randString(16<<10 - 1)}, - {body: randString(16 << 10)}, - {body: randString(16<<10 + 1)}, - {body: randString(512<<10 - 1)}, - {body: randString(512 << 10)}, - {body: randString(512<<10 + 1)}, - {body: randString(1<<20 - 1)}, - {body: randString(1 << 20)}, - {body: randString(1<<20 + 2)}, - } - - type reqInfo struct { - req *http.Request - slurp []byte - err error - } - gotc := make(chan reqInfo, 1) - st := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - slurp, err := ioutil.ReadAll(r.Body) - if err != nil { - gotc <- reqInfo{err: err} - } else { - gotc <- reqInfo{req: r, slurp: slurp} - } - }, - optOnlyServer, - ) - defer st.Close() - - for i, tt := range bodyTests { - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - var body io.Reader = strings.NewReader(tt.body) - if tt.noContentLen { - body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods - } - req, err := http.NewRequest("POST", st.ts.URL, body) - if err != nil { - t.Fatalf("#%d: %v", i, err) - } - c := &http.Client{Transport: tr} - res, err := c.Do(req) - if err != nil { - t.Fatalf("#%d: %v", i, err) - } - defer res.Body.Close() - ri := <-gotc - if ri.err != nil { - t.Errorf("#%d: read error: %v", i, ri.err) - continue - } - if got := string(ri.slurp); got != tt.body { - t.Errorf("#%d: Read body mismatch.\n got: %q (len %d)\nwant: %q (len %d)", i, shortString(got), len(got), shortString(tt.body), len(tt.body)) - } - wantLen := int64(len(tt.body)) - if tt.noContentLen && tt.body != "" { - wantLen = -1 - } - if ri.req.ContentLength != wantLen { - t.Errorf("#%d. handler got ContentLength = %v; want %v", i, ri.req.ContentLength, wantLen) - } - } -} - -func shortString(v string) string { - const maxLen = 100 - if len(v) <= maxLen { - return v - } - return fmt.Sprintf("%v[...%d bytes omitted...]%v", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:]) -} - -func TestTransportDialTLS(t *testing.T) { - var mu sync.Mutex // guards following - var gotReq, didDial bool - - ts := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - mu.Lock() - gotReq = true - mu.Unlock() - }, - optOnlyServer, - ) - defer ts.Close() - tr := &Transport{ - DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { - mu.Lock() - didDial = true - mu.Unlock() - cfg.InsecureSkipVerify = true - c, err := tls.Dial(netw, addr, cfg) - if err != nil { - return nil, err - } - return c, c.Handshake() - }, - } - defer tr.CloseIdleConnections() - client := &http.Client{Transport: tr} - res, err := client.Get(ts.ts.URL) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - mu.Lock() - if !gotReq { - t.Error("didn't get request") - } - if !didDial { - t.Error("didn't use dial hook") - } -} - -func TestConfigureTransport(t *testing.T) { - t1 := &http.Transport{} - err := ConfigureTransport(t1) - if err == errTransportVersion { - t.Skip(err) - } - if err != nil { - t.Fatal(err) - } - if got := fmt.Sprintf("%#v", t1); !strings.Contains(got, `"h2"`) { - // Laziness, to avoid buildtags. - t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got) - } - wantNextProtos := []string{"h2", "http/1.1"} - if t1.TLSClientConfig == nil { - t.Errorf("nil t1.TLSClientConfig") - } else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) { - t.Errorf("TLSClientConfig.NextProtos = %q; want %q", t1.TLSClientConfig.NextProtos, wantNextProtos) - } - if err := ConfigureTransport(t1); err == nil { - t.Error("unexpected success on second call to ConfigureTransport") - } - - // And does it work? - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, r.Proto) - }, optOnlyServer) - defer st.Close() - - t1.TLSClientConfig.InsecureSkipVerify = true - c := &http.Client{Transport: t1} - res, err := c.Get(st.ts.URL) - if err != nil { - t.Fatal(err) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if got, want := string(slurp), "HTTP/2.0"; got != want { - t.Errorf("body = %q; want %q", got, want) - } -} - -type capitalizeReader struct { - r io.Reader -} - -func (cr capitalizeReader) Read(p []byte) (n int, err error) { - n, err = cr.r.Read(p) - for i, b := range p[:n] { - if b >= 'a' && b <= 'z' { - p[i] = b - ('a' - 'A') - } - } - return -} - -type flushWriter struct { - w io.Writer -} - -func (fw flushWriter) Write(p []byte) (n int, err error) { - n, err = fw.w.Write(p) - if f, ok := fw.w.(http.Flusher); ok { - f.Flush() - } - return -} - -type clientTester struct { - t *testing.T - tr *Transport - sc, cc net.Conn // server and client conn - fr *Framer // server's framer - client func() error - server func() error -} - -func newClientTester(t *testing.T) *clientTester { - var dialOnce struct { - sync.Mutex - dialed bool - } - ct := &clientTester{ - t: t, - } - ct.tr = &Transport{ - TLSClientConfig: tlsConfigInsecure, - DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { - dialOnce.Lock() - defer dialOnce.Unlock() - if dialOnce.dialed { - return nil, errors.New("only one dial allowed in test mode") - } - dialOnce.dialed = true - return ct.cc, nil - }, - } - - ln := newLocalListener(t) - cc, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - t.Fatal(err) - - } - sc, err := ln.Accept() - if err != nil { - t.Fatal(err) - } - ln.Close() - ct.cc = cc - ct.sc = sc - ct.fr = NewFramer(sc, sc) - return ct -} - -func newLocalListener(t *testing.T) net.Listener { - ln, err := net.Listen("tcp4", "127.0.0.1:0") - if err == nil { - return ln - } - ln, err = net.Listen("tcp6", "[::1]:0") - if err != nil { - t.Fatal(err) - } - return ln -} - -func (ct *clientTester) greet(settings ...Setting) { - buf := make([]byte, len(ClientPreface)) - _, err := io.ReadFull(ct.sc, buf) - if err != nil { - ct.t.Fatalf("reading client preface: %v", err) - } - f, err := ct.fr.ReadFrame() - if err != nil { - ct.t.Fatalf("Reading client settings frame: %v", err) - } - if sf, ok := f.(*SettingsFrame); !ok { - ct.t.Fatalf("Wanted client settings frame; got %v", f) - _ = sf // stash it away? - } - if err := ct.fr.WriteSettings(settings...); err != nil { - ct.t.Fatal(err) - } - if err := ct.fr.WriteSettingsAck(); err != nil { - ct.t.Fatal(err) - } -} - -func (ct *clientTester) readNonSettingsFrame() (Frame, error) { - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return nil, err - } - if _, ok := f.(*SettingsFrame); ok { - continue - } - return f, nil - } -} - -func (ct *clientTester) cleanup() { - ct.tr.CloseIdleConnections() -} - -func (ct *clientTester) run() { - errc := make(chan error, 2) - ct.start("client", errc, ct.client) - ct.start("server", errc, ct.server) - defer ct.cleanup() - for i := 0; i < 2; i++ { - if err := <-errc; err != nil { - ct.t.Error(err) - return - } - } -} - -func (ct *clientTester) start(which string, errc chan<- error, fn func() error) { - go func() { - finished := false - var err error - defer func() { - if !finished { - err = fmt.Errorf("%s goroutine didn't finish.", which) - } else if err != nil { - err = fmt.Errorf("%s: %v", which, err) - } - errc <- err - }() - err = fn() - finished = true - }() -} - -func (ct *clientTester) readFrame() (Frame, error) { - return readFrameTimeout(ct.fr, 2*time.Second) -} - -func (ct *clientTester) firstHeaders() (*HeadersFrame, error) { - for { - f, err := ct.readFrame() - if err != nil { - return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - switch f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - continue - } - hf, ok := f.(*HeadersFrame) - if !ok { - return nil, fmt.Errorf("Got %T; want HeadersFrame", f) - } - return hf, nil - } -} - -type countingReader struct { - n *int64 -} - -func (r countingReader) Read(p []byte) (n int, err error) { - for i := range p { - p[i] = byte(i) - } - atomic.AddInt64(r.n, int64(len(p))) - return len(p), err -} - -func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) } -func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) } - -func testTransportReqBodyAfterResponse(t *testing.T, status int) { - const bodySize = 10 << 20 - clientDone := make(chan struct{}) - ct := newClientTester(t) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - defer close(clientDone) - - var n int64 // atomic - req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize)) - if err != nil { - return err - } - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - defer res.Body.Close() - if res.StatusCode != status { - return fmt.Errorf("status code = %v; want %v", res.StatusCode, status) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("Slurp: %v", err) - } - if len(slurp) > 0 { - return fmt.Errorf("unexpected body: %q", slurp) - } - if status == 200 { - if got := atomic.LoadInt64(&n); got != bodySize { - return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize) - } - } else { - if got := atomic.LoadInt64(&n); got == 0 || got >= bodySize { - return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize) - } - } - return nil - } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - var dataRecv int64 - var closed bool - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - // If the client's done, it - // will have reported any - // errors on its side. - return nil - default: - return err - } - } - //println(fmt.Sprintf("server got frame: %v", f)) - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - if !f.HeadersEnded() { - return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) - } - if f.StreamEnded() { - return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f) - } - case *DataFrame: - dataLen := len(f.Data()) - if dataLen > 0 { - if dataRecv == 0 { - enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - } - if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { - return err - } - if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { - return err - } - } - dataRecv += int64(dataLen) - - if !closed && ((status != 200 && dataRecv > 0) || - (status == 200 && dataRecv == bodySize)) { - closed = true - if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil { - return err - } - } - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - } - } - ct.run() -} - -// See golang.org/issue/13444 -func TestTransportFullDuplex(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) // redundant but for clarity - w.(http.Flusher).Flush() - io.Copy(flushWriter{w}, capitalizeReader{r.Body}) - fmt.Fprintf(w, "bye.\n") - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - - pr, pw := io.Pipe() - req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr)) - if err != nil { - t.Fatal(err) - } - req.ContentLength = -1 - res, err := c.Do(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - t.Fatalf("StatusCode = %v; want %v", res.StatusCode, 200) - } - bs := bufio.NewScanner(res.Body) - want := func(v string) { - if !bs.Scan() { - t.Fatalf("wanted to read %q but Scan() = false, err = %v", v, bs.Err()) - } - } - write := func(v string) { - _, err := io.WriteString(pw, v) - if err != nil { - t.Fatalf("pipe write: %v", err) - } - } - write("foo\n") - want("FOO") - write("bar\n") - want("BAR") - pw.Close() - want("bye.") - if err := bs.Err(); err != nil { - t.Fatal(err) - } -} - -func TestTransportConnectRequest(t *testing.T) { - gotc := make(chan *http.Request, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - gotc <- r - }, optOnlyServer) - defer st.Close() - - u, err := url.Parse(st.ts.URL) - if err != nil { - t.Fatal(err) - } - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - - tests := []struct { - req *http.Request - want string - }{ - { - req: &http.Request{ - Method: "CONNECT", - Header: http.Header{}, - URL: u, - }, - want: u.Host, - }, - { - req: &http.Request{ - Method: "CONNECT", - Header: http.Header{}, - URL: u, - Host: "example.com:123", - }, - want: "example.com:123", - }, - } - - for i, tt := range tests { - res, err := c.Do(tt.req) - if err != nil { - t.Errorf("%d. RoundTrip = %v", i, err) - continue - } - res.Body.Close() - req := <-gotc - if req.Method != "CONNECT" { - t.Errorf("method = %q; want CONNECT", req.Method) - } - if req.Host != tt.want { - t.Errorf("Host = %q; want %q", req.Host, tt.want) - } - if req.URL.Host != tt.want { - t.Errorf("URL.Host = %q; want %q", req.URL.Host, tt.want) - } - } -} - -type headerType int - -const ( - noHeader headerType = iota // omitted - oneHeader - splitHeader // broken into continuation on purpose -) - -const ( - f0 = noHeader - f1 = oneHeader - f2 = splitHeader - d0 = false - d1 = true -) - -// Test all 36 combinations of response frame orders: -// (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) } -// Generated by http://play.golang.org/p/SScqYKJYXd -func TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) } -func TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) } -func TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) } -func TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) } -func TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) } -func TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) } -func TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) } -func TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) } -func TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) } -func TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) } -func TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) } -func TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) } -func TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) } -func TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) } -func TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) } -func TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) } -func TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) } -func TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) } -func TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) } -func TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) } -func TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) } -func TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) } -func TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) } -func TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) } -func TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) } -func TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) } -func TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) } -func TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) } -func TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) } -func TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) } -func TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) } -func TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) } -func TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) } -func TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) } -func TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) } -func TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) } - -func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) { - const reqBody = "some request body" - const resBody = "some response body" - - if resHeader == noHeader { - // TODO: test 100-continue followed by immediate - // server stream reset, without headers in the middle? - panic("invalid combination") - } - - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody)) - if expect100Continue != noHeader { - req.Header.Set("Expect", "100-continue") - } - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return fmt.Errorf("status code = %v; want 200", res.StatusCode) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("Slurp: %v", err) - } - wantBody := resBody - if !withData { - wantBody = "" - } - if string(slurp) != wantBody { - return fmt.Errorf("body = %q; want %q", slurp, wantBody) - } - if trailers == noHeader { - if len(res.Trailer) > 0 { - t.Errorf("Trailer = %v; want none", res.Trailer) - } - } else { - want := http.Header{"Some-Trailer": {"some-value"}} - if !reflect.DeepEqual(res.Trailer, want) { - t.Errorf("Trailer = %v; want %v", res.Trailer, want) - } - } - return nil - } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - endStream := false - send := func(mode headerType) { - hbf := buf.Bytes() - switch mode { - case oneHeader: - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.Header().StreamID, - EndHeaders: true, - EndStream: endStream, - BlockFragment: hbf, - }) - case splitHeader: - if len(hbf) < 2 { - panic("too small") - } - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.Header().StreamID, - EndHeaders: false, - EndStream: endStream, - BlockFragment: hbf[:1], - }) - ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:]) - default: - panic("bogus mode") - } - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *DataFrame: - if !f.StreamEnded() { - // No need to send flow control tokens. The test request body is tiny. - continue - } - // Response headers (1+ frames; 1 or 2 in this test, but never 0) - { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"}) - enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"}) - if trailers != noHeader { - enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"}) - } - endStream = withData == false && trailers == noHeader - send(resHeader) - } - if withData { - endStream = trailers == noHeader - ct.fr.WriteData(f.StreamID, endStream, []byte(resBody)) - } - if trailers != noHeader { - endStream = true - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"}) - send(trailers) - } - if endStream { - return nil - } - case *HeadersFrame: - if expect100Continue != noHeader { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) - send(expect100Continue) - } - } - } - } - ct.run() -} - -func TestTransportReceiveUndeclaredTrailer(t *testing.T) { - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return fmt.Errorf("status code = %v; want 200", res.StatusCode) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil) - } - if len(slurp) > 0 { - return fmt.Errorf("body = %q; want nothing", slurp) - } - if _, ok := res.Trailer["Some-Trailer"]; !ok { - return fmt.Errorf("expected Some-Trailer") - } - return nil - } - ct.server = func() error { - ct.greet() - - var n int - var hf *HeadersFrame - for hf == nil && n < 10 { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - hf, _ = f.(*HeadersFrame) - n++ - } - - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - - // send headers without Trailer header - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - - // send trailers - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - return nil - } - ct.run() -} - -func TestTransportInvalidTrailer_Pseudo1(t *testing.T) { - testTransportInvalidTrailer_Pseudo(t, oneHeader) -} -func TestTransportInvalidTrailer_Pseudo2(t *testing.T) { - testTransportInvalidTrailer_Pseudo(t, splitHeader) -} -func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) { - testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"}) - enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) - }) -} - -func TestTransportInvalidTrailer_Capital1(t *testing.T) { - testTransportInvalidTrailer_Capital(t, oneHeader) -} -func TestTransportInvalidTrailer_Capital2(t *testing.T) { - testTransportInvalidTrailer_Capital(t, splitHeader) -} -func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) { - testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) - enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"}) - }) -} -func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) { - testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"}) - }) -} -func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) { - testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"}) - }) -} - -func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) { - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return fmt.Errorf("status code = %v; want 200", res.StatusCode) - } - slurp, err := ioutil.ReadAll(res.Body) - se, ok := err.(StreamError) - if !ok || se.Cause != wantErr { - return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr) - } - if len(slurp) > 0 { - return fmt.Errorf("body = %q; want nothing", slurp) - } - return nil - } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - switch f := f.(type) { - case *HeadersFrame: - var endStream bool - send := func(mode headerType) { - hbf := buf.Bytes() - switch mode { - case oneHeader: - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: endStream, - BlockFragment: hbf, - }) - case splitHeader: - if len(hbf) < 2 { - panic("too small") - } - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: false, - EndStream: endStream, - BlockFragment: hbf[:1], - }) - ct.fr.WriteContinuation(f.StreamID, true, hbf[1:]) - default: - panic("bogus mode") - } - } - // Response headers (1+ frames; 1 or 2 in this test, but never 0) - { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"}) - endStream = false - send(oneHeader) - } - // Trailers: - { - endStream = true - buf.Reset() - writeTrailer(enc) - send(trailers) - } - return nil - } - } - } - ct.run() -} - -// headerListSize returns the HTTP2 header list size of h. -// http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE -// http://httpwg.org/specs/rfc7540.html#MaxHeaderBlock -func headerListSize(h http.Header) (size uint32) { - for k, vv := range h { - for _, v := range vv { - hf := hpack.HeaderField{Name: k, Value: v} - size += hf.Size() - } - } - return size -} - -// padHeaders adds data to an http.Header until headerListSize(h) == -// limit. Due to the way header list sizes are calculated, padHeaders -// cannot add fewer than len("Pad-Headers") + 32 bytes to h, and will -// call t.Fatal if asked to do so. PadHeaders first reserves enough -// space for an empty "Pad-Headers" key, then adds as many copies of -// filler as possible. Any remaining bytes necessary to push the -// header list size up to limit are added to h["Pad-Headers"]. -func padHeaders(t *testing.T, h http.Header, limit uint64, filler string) { - if limit > 0xffffffff { - t.Fatalf("padHeaders: refusing to pad to more than 2^32-1 bytes. limit = %v", limit) - } - hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} - minPadding := uint64(hf.Size()) - size := uint64(headerListSize(h)) - - minlimit := size + minPadding - if limit < minlimit { - t.Fatalf("padHeaders: limit %v < %v", limit, minlimit) - } - - // Use a fixed-width format for name so that fieldSize - // remains constant. - nameFmt := "Pad-Headers-%06d" - hf = hpack.HeaderField{Name: fmt.Sprintf(nameFmt, 1), Value: filler} - fieldSize := uint64(hf.Size()) - - // Add as many complete filler values as possible, leaving - // room for at least one empty "Pad-Headers" key. - limit = limit - minPadding - for i := 0; size+fieldSize < limit; i++ { - name := fmt.Sprintf(nameFmt, i) - h.Add(name, filler) - size += fieldSize - } - - // Add enough bytes to reach limit. - remain := limit - size - lastValue := strings.Repeat("*", int(remain)) - h.Add("Pad-Headers", lastValue) -} - -func TestPadHeaders(t *testing.T) { - check := func(h http.Header, limit uint32, fillerLen int) { - if h == nil { - h = make(http.Header) - } - filler := strings.Repeat("f", fillerLen) - padHeaders(t, h, uint64(limit), filler) - gotSize := headerListSize(h) - if gotSize != limit { - t.Errorf("Got size = %v; want %v", gotSize, limit) - } - } - // Try all possible combinations for small fillerLen and limit. - hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} - minLimit := hf.Size() - for limit := minLimit; limit <= 128; limit++ { - for fillerLen := 0; uint32(fillerLen) <= limit; fillerLen++ { - check(nil, limit, fillerLen) - } - } - - // Try a few tests with larger limits, plus cumulative - // tests. Since these tests are cumulative, tests[i+1].limit - // must be >= tests[i].limit + minLimit. See the comment on - // padHeaders for more info on why the limit arg has this - // restriction. - tests := []struct { - fillerLen int - limit uint32 - }{ - { - fillerLen: 64, - limit: 1024, - }, - { - fillerLen: 1024, - limit: 1286, - }, - { - fillerLen: 256, - limit: 2048, - }, - { - fillerLen: 1024, - limit: 10 * 1024, - }, - { - fillerLen: 1023, - limit: 11 * 1024, - }, - } - h := make(http.Header) - for _, tc := range tests { - check(nil, tc.limit, tc.fillerLen) - check(h, tc.limit, tc.fillerLen) - } -} - -func TestTransportChecksRequestHeaderListSize(t *testing.T) { - st := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - // Consume body & force client to send - // trailers before writing response. - // ioutil.ReadAll returns non-nil err for - // requests that attempt to send greater than - // maxHeaderListSize bytes of trailers, since - // those requests generate a stream reset. - ioutil.ReadAll(r.Body) - r.Body.Close() - }, - func(ts *httptest.Server) { - ts.Config.MaxHeaderBytes = 16 << 10 - }, - optOnlyServer, - optQuiet, - ) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - checkRoundTrip := func(req *http.Request, wantErr error, desc string) { - res, err := tr.RoundTrip(req) - if err != wantErr { - if res != nil { - res.Body.Close() - } - t.Errorf("%v: RoundTrip err = %v; want %v", desc, err, wantErr) - return - } - if err == nil { - if res == nil { - t.Errorf("%v: response nil; want non-nil.", desc) - return - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - t.Errorf("%v: response status = %v; want %v", desc, res.StatusCode, http.StatusOK) - } - return - } - if res != nil { - t.Errorf("%v: RoundTrip err = %v but response non-nil", desc, err) - } - } - headerListSizeForRequest := func(req *http.Request) (size uint64) { - contentLen := actualContentLength(req) - trailers, err := commaSeparatedTrailers(req) - if err != nil { - t.Fatalf("headerListSizeForRequest: %v", err) - } - cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} - cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.mu.Lock() - hdrs, err := cc.encodeHeaders(req, true, trailers, contentLen) - cc.mu.Unlock() - if err != nil { - t.Fatalf("headerListSizeForRequest: %v", err) - } - hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(hf hpack.HeaderField) { - size += uint64(hf.Size()) - }) - if len(hdrs) > 0 { - if _, err := hpackDec.Write(hdrs); err != nil { - t.Fatalf("headerListSizeForRequest: %v", err) - } - } - return size - } - // Create a new Request for each test, rather than reusing the - // same Request, to avoid a race when modifying req.Headers. - // See https://github.com/golang/go/issues/21316 - newRequest := func() *http.Request { - // Body must be non-nil to enable writing trailers. - body := strings.NewReader("hello") - req, err := http.NewRequest("POST", st.ts.URL, body) - if err != nil { - t.Fatalf("newRequest: NewRequest: %v", err) - } - return req - } - - // Make an arbitrary request to ensure we get the server's - // settings frame and initialize peerMaxHeaderListSize. - req := newRequest() - checkRoundTrip(req, nil, "Initial request") - - // Get the ClientConn associated with the request and validate - // peerMaxHeaderListSize. - addr := authorityAddr(req.URL.Scheme, req.URL.Host) - cc, err := tr.connPool().GetClientConn(req, addr) - if err != nil { - t.Fatalf("GetClientConn: %v", err) - } - cc.mu.Lock() - peerSize := cc.peerMaxHeaderListSize - cc.mu.Unlock() - st.scMu.Lock() - wantSize := uint64(st.sc.maxHeaderListSize()) - st.scMu.Unlock() - if peerSize != wantSize { - t.Errorf("peerMaxHeaderListSize = %v; want %v", peerSize, wantSize) - } - - // Sanity check peerSize. (*serverConn) maxHeaderListSize adds - // 320 bytes of padding. - wantHeaderBytes := uint64(st.ts.Config.MaxHeaderBytes) + 320 - if peerSize != wantHeaderBytes { - t.Errorf("peerMaxHeaderListSize = %v; want %v.", peerSize, wantHeaderBytes) - } - - // Pad headers & trailers, but stay under peerSize. - req = newRequest() - req.Header = make(http.Header) - req.Trailer = make(http.Header) - filler := strings.Repeat("*", 1024) - padHeaders(t, req.Trailer, peerSize, filler) - // cc.encodeHeaders adds some default headers to the request, - // so we need to leave room for those. - defaultBytes := headerListSizeForRequest(req) - padHeaders(t, req.Header, peerSize-defaultBytes, filler) - checkRoundTrip(req, nil, "Headers & Trailers under limit") - - // Add enough header bytes to push us over peerSize. - req = newRequest() - req.Header = make(http.Header) - padHeaders(t, req.Header, peerSize, filler) - checkRoundTrip(req, errRequestHeaderListSize, "Headers over limit") - - // Push trailers over the limit. - req = newRequest() - req.Trailer = make(http.Header) - padHeaders(t, req.Trailer, peerSize+1, filler) - checkRoundTrip(req, errRequestHeaderListSize, "Trailers over limit") - - // Send headers with a single large value. - req = newRequest() - filler = strings.Repeat("*", int(peerSize)) - req.Header = make(http.Header) - req.Header.Set("Big", filler) - checkRoundTrip(req, errRequestHeaderListSize, "Single large header") - - // Send trailers with a single large value. - req = newRequest() - req.Trailer = make(http.Header) - req.Trailer.Set("Big", filler) - checkRoundTrip(req, errRequestHeaderListSize, "Single large trailer") -} - -func TestTransportChecksResponseHeaderListSize(t *testing.T) { - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != errResponseHeaderListSize { - if res != nil { - res.Body.Close() - } - size := int64(0) - for k, vv := range res.Header { - for _, v := range vv { - size += int64(len(k)) + int64(len(v)) + 32 - } - } - return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size) - } - return nil - } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - switch f := f.(type) { - case *HeadersFrame: - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - large := strings.Repeat("a", 1<<10) - for i := 0; i < 5042; i++ { - enc.WriteField(hpack.HeaderField{Name: large, Value: large}) - } - if size, want := buf.Len(), 6329; size != want { - // Note: this number might change if - // our hpack implementation - // changes. That's fine. This is - // just a sanity check that our - // response can fit in a single - // header block fragment frame. - return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want) - } - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - return nil - } - } - } - ct.run() -} - -// Test that the the Transport returns a typed error from Response.Body.Read calls -// when the server sends an error. (here we use a panic, since that should generate -// a stream error, but others like cancel should be similar) -func TestTransportBodyReadErrorType(t *testing.T) { - doPanic := make(chan bool, 1) - st := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - w.(http.Flusher).Flush() // force headers out - <-doPanic - panic("boom") - }, - optOnlyServer, - optQuiet, - ) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - - res, err := c.Get(st.ts.URL) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - doPanic <- true - buf := make([]byte, 100) - n, err := res.Body.Read(buf) - want := StreamError{StreamID: 0x1, Code: 0x2} - if !reflect.DeepEqual(want, err) { - t.Errorf("Read = %v, %#v; want error %#v", n, err, want) - } -} - -// golang.org/issue/13924 -// This used to fail after many iterations, especially with -race: -// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race -func TestTransportDoubleCloseOnWriteError(t *testing.T) { - var ( - mu sync.Mutex - conn net.Conn // to close if set - ) - - st := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - mu.Lock() - defer mu.Unlock() - if conn != nil { - conn.Close() - } - }, - optOnlyServer, - ) - defer st.Close() - - tr := &Transport{ - TLSClientConfig: tlsConfigInsecure, - DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { - tc, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - mu.Lock() - defer mu.Unlock() - conn = tc - return tc, nil - }, - } - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - c.Get(st.ts.URL) -} - -// Test that the http1 Transport.DisableKeepAlives option is respected -// and connections are closed as soon as idle. -// See golang.org/issue/14008 -func TestTransportDisableKeepAlives(t *testing.T) { - st := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "hi") - }, - optOnlyServer, - ) - defer st.Close() - - connClosed := make(chan struct{}) // closed on tls.Conn.Close - tr := &Transport{ - t1: &http.Transport{ - DisableKeepAlives: true, - }, - TLSClientConfig: tlsConfigInsecure, - DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { - tc, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - return ¬eCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil - }, - } - c := &http.Client{Transport: tr} - res, err := c.Get(st.ts.URL) - if err != nil { - t.Fatal(err) - } - if _, err := ioutil.ReadAll(res.Body); err != nil { - t.Fatal(err) - } - defer res.Body.Close() - - select { - case <-connClosed: - case <-time.After(1 * time.Second): - t.Errorf("timeout") - } - -} - -// Test concurrent requests with Transport.DisableKeepAlives. We can share connections, -// but when things are totally idle, it still needs to close. -func TestTransportDisableKeepAlives_Concurrency(t *testing.T) { - const D = 25 * time.Millisecond - st := newServerTester(t, - func(w http.ResponseWriter, r *http.Request) { - time.Sleep(D) - io.WriteString(w, "hi") - }, - optOnlyServer, - ) - defer st.Close() - - var dials int32 - var conns sync.WaitGroup - tr := &Transport{ - t1: &http.Transport{ - DisableKeepAlives: true, - }, - TLSClientConfig: tlsConfigInsecure, - DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { - tc, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - atomic.AddInt32(&dials, 1) - conns.Add(1) - return ¬eCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil - }, - } - c := &http.Client{Transport: tr} - var reqs sync.WaitGroup - const N = 20 - for i := 0; i < N; i++ { - reqs.Add(1) - if i == N-1 { - // For the final request, try to make all the - // others close. This isn't verified in the - // count, other than the Log statement, since - // it's so timing dependent. This test is - // really to make sure we don't interrupt a - // valid request. - time.Sleep(D * 2) - } - go func() { - defer reqs.Done() - res, err := c.Get(st.ts.URL) - if err != nil { - t.Error(err) - return - } - if _, err := ioutil.ReadAll(res.Body); err != nil { - t.Error(err) - return - } - res.Body.Close() - }() - } - reqs.Wait() - conns.Wait() - t.Logf("did %d dials, %d requests", atomic.LoadInt32(&dials), N) -} - -type noteCloseConn struct { - net.Conn - onceClose sync.Once - closefn func() -} - -func (c *noteCloseConn) Close() error { - c.onceClose.Do(c.closefn) - return c.Conn.Close() -} - -func isTimeout(err error) bool { - switch err := err.(type) { - case nil: - return false - case *url.Error: - return isTimeout(err.Err) - case net.Error: - return err.Timeout() - } - return false -} - -// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent. -func TestTransportResponseHeaderTimeout_NoBody(t *testing.T) { - testTransportResponseHeaderTimeout(t, false) -} -func TestTransportResponseHeaderTimeout_Body(t *testing.T) { - testTransportResponseHeaderTimeout(t, true) -} - -func testTransportResponseHeaderTimeout(t *testing.T, body bool) { - ct := newClientTester(t) - ct.tr.t1 = &http.Transport{ - ResponseHeaderTimeout: 5 * time.Millisecond, - } - ct.client = func() error { - c := &http.Client{Transport: ct.tr} - var err error - var n int64 - const bodySize = 4 << 20 - if body { - _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize)) - } else { - _, err = c.Get("https://dummy.tld/") - } - if !isTimeout(err) { - t.Errorf("client expected timeout error; got %#v", err) - } - if body && n != bodySize { - t.Errorf("only read %d bytes of body; want %d", n, bodySize) - } - return nil - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - t.Logf("ReadFrame: %v", err) - return nil - } - switch f := f.(type) { - case *DataFrame: - dataLen := len(f.Data()) - if dataLen > 0 { - if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { - return err - } - if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { - return err - } - } - case *RSTStreamFrame: - if f.StreamID == 1 && f.ErrCode == ErrCodeCancel { - return nil - } - } - } - } - ct.run() -} - -func TestTransportDisableCompression(t *testing.T) { - const body = "sup" - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - want := http.Header{ - "User-Agent": []string{"Go-http-client/2.0"}, - } - if !reflect.DeepEqual(r.Header, want) { - t.Errorf("request headers = %v; want %v", r.Header, want) - } - }, optOnlyServer) - defer st.Close() - - tr := &Transport{ - TLSClientConfig: tlsConfigInsecure, - t1: &http.Transport{ - DisableCompression: true, - }, - } - defer tr.CloseIdleConnections() - - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - t.Fatal(err) - } - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() -} - -// RFC 7540 section 8.1.2.2 -func TestTransportRejectsConnHeaders(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - var got []string - for k := range r.Header { - got = append(got, k) - } - sort.Strings(got) - w.Header().Set("Got-Header", strings.Join(got, ",")) - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - tests := []struct { - key string - value []string - want string - }{ - { - key: "Upgrade", - value: []string{"anything"}, - want: "ERROR: http2: invalid Upgrade request header: [\"anything\"]", - }, - { - key: "Connection", - value: []string{"foo"}, - want: "ERROR: http2: invalid Connection request header: [\"foo\"]", - }, - { - key: "Connection", - value: []string{"close"}, - want: "Accept-Encoding,User-Agent", - }, - { - key: "Connection", - value: []string{"close", "something-else"}, - want: "ERROR: http2: invalid Connection request header: [\"close\" \"something-else\"]", - }, - { - key: "Connection", - value: []string{"keep-alive"}, - want: "Accept-Encoding,User-Agent", - }, - { - key: "Proxy-Connection", // just deleted and ignored - value: []string{"keep-alive"}, - want: "Accept-Encoding,User-Agent", - }, - { - key: "Transfer-Encoding", - value: []string{""}, - want: "Accept-Encoding,User-Agent", - }, - { - key: "Transfer-Encoding", - value: []string{"foo"}, - want: "ERROR: http2: invalid Transfer-Encoding request header: [\"foo\"]", - }, - { - key: "Transfer-Encoding", - value: []string{"chunked"}, - want: "Accept-Encoding,User-Agent", - }, - { - key: "Transfer-Encoding", - value: []string{"chunked", "other"}, - want: "ERROR: http2: invalid Transfer-Encoding request header: [\"chunked\" \"other\"]", - }, - { - key: "Content-Length", - value: []string{"123"}, - want: "Accept-Encoding,User-Agent", - }, - { - key: "Keep-Alive", - value: []string{"doop"}, - want: "Accept-Encoding,User-Agent", - }, - } - - for _, tt := range tests { - req, _ := http.NewRequest("GET", st.ts.URL, nil) - req.Header[tt.key] = tt.value - res, err := tr.RoundTrip(req) - var got string - if err != nil { - got = fmt.Sprintf("ERROR: %v", err) - } else { - got = res.Header.Get("Got-Header") - res.Body.Close() - } - if got != tt.want { - t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want) - } - } -} - -// golang.org/issue/14048 -func TestTransportFailsOnInvalidHeaders(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - var got []string - for k := range r.Header { - got = append(got, k) - } - sort.Strings(got) - w.Header().Set("Got-Header", strings.Join(got, ",")) - }, optOnlyServer) - defer st.Close() - - tests := [...]struct { - h http.Header - wantErr string - }{ - 0: { - h: http.Header{"with space": {"foo"}}, - wantErr: `invalid HTTP header name "with space"`, - }, - 1: { - h: http.Header{"name": {"БрÑд"}}, - wantErr: "", // okay - }, - 2: { - h: http.Header{"имÑ": {"Brad"}}, - wantErr: `invalid HTTP header name "имÑ"`, - }, - 3: { - h: http.Header{"foo": {"foo\x01bar"}}, - wantErr: `invalid HTTP header value "foo\x01bar" for header "foo"`, - }, - } - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - for i, tt := range tests { - req, _ := http.NewRequest("GET", st.ts.URL, nil) - req.Header = tt.h - res, err := tr.RoundTrip(req) - var bad bool - if tt.wantErr == "" { - if err != nil { - bad = true - t.Errorf("case %d: error = %v; want no error", i, err) - } - } else { - if !strings.Contains(fmt.Sprint(err), tt.wantErr) { - bad = true - t.Errorf("case %d: error = %v; want error %q", i, err, tt.wantErr) - } - } - if err == nil { - if bad { - t.Logf("case %d: server got headers %q", i, res.Header.Get("Got-Header")) - } - res.Body.Close() - } - } -} - -// Tests that gzipReader doesn't crash on a second Read call following -// the first Read call's gzip.NewReader returning an error. -func TestGzipReader_DoubleReadCrash(t *testing.T) { - gz := &gzipReader{ - body: ioutil.NopCloser(strings.NewReader("0123456789")), - } - var buf [1]byte - n, err1 := gz.Read(buf[:]) - if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") { - t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1) - } - n, err2 := gz.Read(buf[:]) - if n != 0 || err2 != err1 { - t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1) - } -} - -func TestTransportNewTLSConfig(t *testing.T) { - tests := [...]struct { - conf *tls.Config - host string - want *tls.Config - }{ - // Normal case. - 0: { - conf: nil, - host: "foo.com", - want: &tls.Config{ - ServerName: "foo.com", - NextProtos: []string{NextProtoTLS}, - }, - }, - - // User-provided name (bar.com) takes precedence: - 1: { - conf: &tls.Config{ - ServerName: "bar.com", - }, - host: "foo.com", - want: &tls.Config{ - ServerName: "bar.com", - NextProtos: []string{NextProtoTLS}, - }, - }, - - // NextProto is prepended: - 2: { - conf: &tls.Config{ - NextProtos: []string{"foo", "bar"}, - }, - host: "example.com", - want: &tls.Config{ - ServerName: "example.com", - NextProtos: []string{NextProtoTLS, "foo", "bar"}, - }, - }, - - // NextProto is not duplicated: - 3: { - conf: &tls.Config{ - NextProtos: []string{"foo", "bar", NextProtoTLS}, - }, - host: "example.com", - want: &tls.Config{ - ServerName: "example.com", - NextProtos: []string{"foo", "bar", NextProtoTLS}, - }, - }, - } - for i, tt := range tests { - // Ignore the session ticket keys part, which ends up populating - // unexported fields in the Config: - if tt.conf != nil { - tt.conf.SessionTicketsDisabled = true - } - - tr := &Transport{TLSClientConfig: tt.conf} - got := tr.newTLSConfig(tt.host) - - got.SessionTicketsDisabled = false - - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%d. got %#v; want %#v", i, got, tt.want) - } - } -} - -// The Google GFE responds to HEAD requests with a HEADERS frame -// without END_STREAM, followed by a 0-length DATA frame with -// END_STREAM. Make sure we don't get confused by that. (We did.) -func TestTransportReadHeadResponse(t *testing.T) { - ct := newClientTester(t) - clientDone := make(chan struct{}) - ct.client = func() error { - defer close(clientDone) - req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return err - } - if res.ContentLength != 123 { - return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("ReadAll: %v", err) - } - if len(slurp) > 0 { - return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) - } - return nil - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - t.Logf("ReadFrame: %v", err) - return nil - } - hf, ok := f.(*HeadersFrame) - if !ok { - continue - } - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, // as the GFE does - BlockFragment: buf.Bytes(), - }) - ct.fr.WriteData(hf.StreamID, true, nil) - - <-clientDone - return nil - } - } - ct.run() -} - -type neverEnding byte - -func (b neverEnding) Read(p []byte) (int, error) { - for i := range p { - p[i] = byte(b) - } - return len(p), nil -} - -// golang.org/issue/15425: test that a handler closing the request -// body doesn't terminate the stream to the peer. (It just stops -// readability from the handler's side, and eventually the client -// runs out of flow control tokens) -func TestTransportHandlerBodyClose(t *testing.T) { - const bodySize = 10 << 20 - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - r.Body.Close() - io.Copy(w, io.LimitReader(neverEnding('A'), bodySize)) - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - g0 := runtime.NumGoroutine() - - const numReq = 10 - for i := 0; i < numReq; i++ { - req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) - if err != nil { - t.Fatal(err) - } - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - if n != bodySize || err != nil { - t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize) - } - } - tr.CloseIdleConnections() - - gd := runtime.NumGoroutine() - g0 - if gd > numReq/2 { - t.Errorf("appeared to leak goroutines") - } - -} - -// https://golang.org/issue/15930 -func TestTransportFlowControl(t *testing.T) { - const bufLen = 64 << 10 - var total int64 = 100 << 20 // 100MB - if testing.Short() { - total = 10 << 20 - } - - var wrote int64 // updated atomically - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - b := make([]byte, bufLen) - for wrote < total { - n, err := w.Write(b) - atomic.AddInt64(&wrote, int64(n)) - if err != nil { - t.Errorf("ResponseWriter.Write error: %v", err) - break - } - w.(http.Flusher).Flush() - } - }, optOnlyServer) - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - t.Fatal("NewRequest error:", err) - } - resp, err := tr.RoundTrip(req) - if err != nil { - t.Fatal("RoundTrip error:", err) - } - defer resp.Body.Close() - - var read int64 - b := make([]byte, bufLen) - for { - n, err := resp.Body.Read(b) - if err == io.EOF { - break - } - if err != nil { - t.Fatal("Read error:", err) - } - read += int64(n) - - const max = transportDefaultStreamFlow - if w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max { - t.Fatalf("Too much data inflight: server wrote %v bytes but client only received %v", w, read) - } - - // Let the server get ahead of the client. - time.Sleep(1 * time.Millisecond) - } -} - -// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make -// the Transport remember it and return it back to users (via -// RoundTrip or request body reads) if needed (e.g. if the server -// proceeds to close the TCP connection before the client gets its -// response) -func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) { - testTransportUsesGoAwayDebugError(t, false) -} - -func TestTransportUsesGoAwayDebugError_Body(t *testing.T) { - testTransportUsesGoAwayDebugError(t, true) -} - -func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) { - ct := newClientTester(t) - clientDone := make(chan struct{}) - - const goAwayErrCode = ErrCodeHTTP11Required // arbitrary - const goAwayDebugData = "some debug data" - - ct.client = func() error { - defer close(clientDone) - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if failMidBody { - if err != nil { - return fmt.Errorf("unexpected client RoundTrip error: %v", err) - } - _, err = io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - } - want := GoAwayError{ - LastStreamID: 5, - ErrCode: goAwayErrCode, - DebugData: goAwayDebugData, - } - if !reflect.DeepEqual(err, want) { - t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want) - } - return nil - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - t.Logf("ReadFrame: %v", err) - return nil - } - hf, ok := f.(*HeadersFrame) - if !ok { - continue - } - if failMidBody { - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - } - // Write two GOAWAY frames, to test that the Transport takes - // the interesting parts of both. - ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData)) - ct.fr.WriteGoAway(5, goAwayErrCode, nil) - ct.sc.(*net.TCPConn).CloseWrite() - <-clientDone - return nil - } - } - ct.run() -} - -func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) { - ct := newClientTester(t) - - clientClosed := make(chan struct{}) - serverWroteFirstByte := make(chan struct{}) - - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return err - } - <-serverWroteFirstByte - - if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 { - return fmt.Errorf("body read = %v, %v; want 1, nil", n, err) - } - res.Body.Close() // leaving 4999 bytes unread - close(clientClosed) - - return nil - } - ct.server = func() error { - ct.greet() - - var hf *HeadersFrame - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - switch f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - continue - } - var ok bool - hf, ok = f.(*HeadersFrame) - if !ok { - return fmt.Errorf("Got %T; want HeadersFrame", f) - } - break - } - - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - - // Two cases: - // - Send one DATA frame with 5000 bytes. - // - Send two DATA frames with 1 and 4999 bytes each. - // - // In both cases, the client should consume one byte of data, - // refund that byte, then refund the following 4999 bytes. - // - // In the second case, the server waits for the client connection to - // close before seconding the second DATA frame. This tests the case - // where the client receives a DATA frame after it has reset the stream. - if oneDataFrame { - ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000)) - close(serverWroteFirstByte) - <-clientClosed - } else { - ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1)) - close(serverWroteFirstByte) - <-clientClosed - ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999)) - } - - waitingFor := "RSTStreamFrame" - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return fmt.Errorf("ReadFrame while waiting for %s: %v", waitingFor, err) - } - if _, ok := f.(*SettingsFrame); ok { - continue - } - switch waitingFor { - case "RSTStreamFrame": - if rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel { - return fmt.Errorf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f)) - } - waitingFor = "WindowUpdateFrame" - case "WindowUpdateFrame": - if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 { - return fmt.Errorf("Expected WindowUpdateFrame for 4999 bytes; got %v", summarizeFrame(f)) - } - return nil - } - } - } - ct.run() -} - -// See golang.org/issue/16481 -func TestTransportReturnsUnusedFlowControlSingleWrite(t *testing.T) { - testTransportReturnsUnusedFlowControl(t, true) -} - -// See golang.org/issue/20469 -func TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) { - testTransportReturnsUnusedFlowControl(t, false) -} - -// Issue 16612: adjust flow control on open streams when transport -// receives SETTINGS with INITIAL_WINDOW_SIZE from server. -func TestTransportAdjustsFlowControl(t *testing.T) { - ct := newClientTester(t) - clientDone := make(chan struct{}) - - const bodySize = 1 << 20 - - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - defer close(clientDone) - - req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return err - } - res.Body.Close() - return nil - } - ct.server = func() error { - _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface))) - if err != nil { - return fmt.Errorf("reading client preface: %v", err) - } - - var gotBytes int64 - var sentSettings bool - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - return nil - default: - return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - } - switch f := f.(type) { - case *DataFrame: - gotBytes += int64(len(f.Data())) - // After we've got half the client's - // initial flow control window's worth - // of request body data, give it just - // enough flow control to finish. - if gotBytes >= initialWindowSize/2 && !sentSettings { - sentSettings = true - - ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize}) - ct.fr.WriteWindowUpdate(0, bodySize) - ct.fr.WriteSettingsAck() - } - - if f.StreamEnded() { - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - } - } - } - } - ct.run() -} - -// See golang.org/issue/16556 -func TestTransportReturnsDataPaddingFlowControl(t *testing.T) { - ct := newClientTester(t) - - unblockClient := make(chan bool, 1) - - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return err - } - defer res.Body.Close() - <-unblockClient - return nil - } - ct.server = func() error { - ct.greet() - - var hf *HeadersFrame - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - switch f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - continue - } - var ok bool - hf, ok = f.(*HeadersFrame) - if !ok { - return fmt.Errorf("Got %T; want HeadersFrame", f) - } - break - } - - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - pad := make([]byte, 5) - ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream - - f, err := ct.readNonSettingsFrame() - if err != nil { - return fmt.Errorf("ReadFrame while waiting for first WindowUpdateFrame: %v", err) - } - wantBack := uint32(len(pad)) + 1 // one byte for the length of the padding - if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID != 0 { - return fmt.Errorf("Expected conn WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) - } - - f, err = ct.readNonSettingsFrame() - if err != nil { - return fmt.Errorf("ReadFrame while waiting for second WindowUpdateFrame: %v", err) - } - if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID == 0 { - return fmt.Errorf("Expected stream WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) - } - unblockClient <- true - return nil - } - ct.run() -} - -// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a -// StreamError as a result of the response HEADERS -func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) { - ct := newClientTester(t) - - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err == nil { - res.Body.Close() - return errors.New("unexpected successful GET") - } - want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")} - if !reflect.DeepEqual(want, err) { - t.Errorf("RoundTrip error = %#v; want %#v", err, want) - } - return nil - } - ct.server = func() error { - ct.greet() - - hf, err := ct.firstHeaders() - if err != nil { - return err - } - - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - - for { - fr, err := ct.readFrame() - if err != nil { - return fmt.Errorf("error waiting for RST_STREAM from client: %v", err) - } - if _, ok := fr.(*SettingsFrame); ok { - continue - } - if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol { - t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr)) - } - break - } - - return nil - } - ct.run() -} - -// byteAndEOFReader returns is in an io.Reader which reads one byte -// (the underlying byte) and io.EOF at once in its Read call. -type byteAndEOFReader byte - -func (b byteAndEOFReader) Read(p []byte) (n int, err error) { - if len(p) == 0 { - panic("unexpected useless call") - } - p[0] = byte(b) - return 1, io.EOF -} - -// Issue 16788: the Transport had a regression where it started -// sending a spurious DATA frame with a duplicate END_STREAM bit after -// the request body writer goroutine had already read an EOF from the -// Request.Body and included the END_STREAM on a data-carrying DATA -// frame. -// -// Notably, to trigger this, the requests need to use a Request.Body -// which returns (non-0, io.EOF) and also needs to set the ContentLength -// explicitly. -func TestTransportBodyDoubleEndStream(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - // Nothing. - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - for i := 0; i < 2; i++ { - req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a')) - req.ContentLength = 1 - res, err := tr.RoundTrip(req) - if err != nil { - t.Fatalf("failure on req %d: %v", i+1, err) - } - defer res.Body.Close() - } -} - -// golang.org/issue/16847, golang.org/issue/19103 -func TestTransportRequestPathPseudo(t *testing.T) { - type result struct { - path string - err string - } - tests := []struct { - req *http.Request - want result - }{ - 0: { - req: &http.Request{ - Method: "GET", - URL: &url.URL{ - Host: "foo.com", - Path: "/foo", - }, - }, - want: result{path: "/foo"}, - }, - // In Go 1.7, we accepted paths of "//foo". - // In Go 1.8, we rejected it (issue 16847). - // In Go 1.9, we accepted it again (issue 19103). - 1: { - req: &http.Request{ - Method: "GET", - URL: &url.URL{ - Host: "foo.com", - Path: "//foo", - }, - }, - want: result{path: "//foo"}, - }, - - // Opaque with //$Matching_Hostname/path - 2: { - req: &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "https", - Opaque: "//foo.com/path", - Host: "foo.com", - Path: "/ignored", - }, - }, - want: result{path: "/path"}, - }, - - // Opaque with some other Request.Host instead: - 3: { - req: &http.Request{ - Method: "GET", - Host: "bar.com", - URL: &url.URL{ - Scheme: "https", - Opaque: "//bar.com/path", - Host: "foo.com", - Path: "/ignored", - }, - }, - want: result{path: "/path"}, - }, - - // Opaque without the leading "//": - 4: { - req: &http.Request{ - Method: "GET", - URL: &url.URL{ - Opaque: "/path", - Host: "foo.com", - Path: "/ignored", - }, - }, - want: result{path: "/path"}, - }, - - // Opaque we can't handle: - 5: { - req: &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "https", - Opaque: "//unknown_host/path", - Host: "foo.com", - Path: "/ignored", - }, - }, - want: result{err: `invalid request :path "https://unknown_host/path" from URL.Opaque = "//unknown_host/path"`}, - }, - - // A CONNECT request: - 6: { - req: &http.Request{ - Method: "CONNECT", - URL: &url.URL{ - Host: "foo.com", - }, - }, - want: result{}, - }, - } - for i, tt := range tests { - cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} - cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.mu.Lock() - hdrs, err := cc.encodeHeaders(tt.req, false, "", -1) - cc.mu.Unlock() - var got result - hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) { - if f.Name == ":path" { - got.path = f.Value - } - }) - if err != nil { - got.err = err.Error() - } else if len(hdrs) > 0 { - if _, err := hpackDec.Write(hdrs); err != nil { - t.Errorf("%d. bogus hpack: %v", i, err) - continue - } - } - if got != tt.want { - t.Errorf("%d. got %+v; want %+v", i, got, tt.want) - } - - } - -} - -// golang.org/issue/17071 -- don't sniff the first byte of the request body -// before we've determined that the ClientConn is usable. -func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) { - const body = "foo" - req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body))) - cc := &ClientConn{ - closed: true, - } - _, err := cc.RoundTrip(req) - if err != errClientConnUnusable { - t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err) - } - slurp, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Errorf("ReadAll = %v", err) - } - if string(slurp) != body { - t.Errorf("Body = %q; want %q", slurp, body) - } -} - -func TestClientConnPing(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer) - defer st.Close() - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - cc, err := tr.dialClientConn(st.ts.Listener.Addr().String(), false) - if err != nil { - t.Fatal(err) - } - if err = cc.Ping(testContext{}); err != nil { - t.Fatal(err) - } -} - -// Issue 16974: if the server sent a DATA frame after the user -// canceled the Transport's Request, the Transport previously wrote to a -// closed pipe, got an error, and ended up closing the whole TCP -// connection. -func TestTransportCancelDataResponseRace(t *testing.T) { - cancel := make(chan struct{}) - clientGotError := make(chan bool, 1) - - const msg = "Hello." - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - if strings.Contains(r.URL.Path, "/hello") { - time.Sleep(50 * time.Millisecond) - io.WriteString(w, msg) - return - } - for i := 0; i < 50; i++ { - io.WriteString(w, "Some data.") - w.(http.Flusher).Flush() - if i == 2 { - close(cancel) - <-clientGotError - } - time.Sleep(10 * time.Millisecond) - } - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - c := &http.Client{Transport: tr} - req, _ := http.NewRequest("GET", st.ts.URL, nil) - req.Cancel = cancel - res, err := c.Do(req) - if err != nil { - t.Fatal(err) - } - if _, err = io.Copy(ioutil.Discard, res.Body); err == nil { - t.Fatal("unexpected success") - } - clientGotError <- true - - res, err = c.Get(st.ts.URL + "/hello") - if err != nil { - t.Fatal(err) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if string(slurp) != msg { - t.Errorf("Got = %q; want %q", slurp, msg) - } -} - -func TestTransportRetryAfterGOAWAY(t *testing.T) { - var dialer struct { - sync.Mutex - count int - } - ct1 := make(chan *clientTester) - ct2 := make(chan *clientTester) - - ln := newLocalListener(t) - defer ln.Close() - - tr := &Transport{ - TLSClientConfig: tlsConfigInsecure, - } - tr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { - dialer.Lock() - defer dialer.Unlock() - dialer.count++ - if dialer.count == 3 { - return nil, errors.New("unexpected number of dials") - } - cc, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - return nil, fmt.Errorf("dial error: %v", err) - } - sc, err := ln.Accept() - if err != nil { - return nil, fmt.Errorf("accept error: %v", err) - } - ct := &clientTester{ - t: t, - tr: tr, - cc: cc, - sc: sc, - fr: NewFramer(sc, sc), - } - switch dialer.count { - case 1: - ct1 <- ct - case 2: - ct2 <- ct - } - return cc, nil - } - - errs := make(chan error, 3) - done := make(chan struct{}) - defer close(done) - - // Client. - go func() { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := tr.RoundTrip(req) - if res != nil { - res.Body.Close() - if got := res.Header.Get("Foo"); got != "bar" { - err = fmt.Errorf("foo header = %q; want bar", got) - } - } - if err != nil { - err = fmt.Errorf("RoundTrip: %v", err) - } - errs <- err - }() - - connToClose := make(chan io.Closer, 2) - - // Server for the first request. - go func() { - var ct *clientTester - select { - case ct = <-ct1: - case <-done: - return - } - - connToClose <- ct.cc - ct.greet() - hf, err := ct.firstHeaders() - if err != nil { - errs <- fmt.Errorf("server1 failed reading HEADERS: %v", err) - return - } - t.Logf("server1 got %v", hf) - if err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil { - errs <- fmt.Errorf("server1 failed writing GOAWAY: %v", err) - return - } - errs <- nil - }() - - // Server for the second request. - go func() { - var ct *clientTester - select { - case ct = <-ct2: - case <-done: - return - } - - connToClose <- ct.cc - ct.greet() - hf, err := ct.firstHeaders() - if err != nil { - errs <- fmt.Errorf("server2 failed reading HEADERS: %v", err) - return - } - t.Logf("server2 got %v", hf) - - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) - err = ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - if err != nil { - errs <- fmt.Errorf("server2 failed writing response HEADERS: %v", err) - } else { - errs <- nil - } - }() - - for k := 0; k < 3; k++ { - select { - case err := <-errs: - if err != nil { - t.Error(err) - } - case <-time.After(1 * time.Second): - t.Errorf("timed out") - } - } - - for { - select { - case c := <-connToClose: - c.Close() - default: - return - } - } -} - -func TestTransportRetryAfterRefusedStream(t *testing.T) { - clientDone := make(chan struct{}) - ct := newClientTester(t) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - defer close(clientDone) - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - resp, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - resp.Body.Close() - if resp.StatusCode != 204 { - return fmt.Errorf("Status = %v; want 204", resp.StatusCode) - } - return nil - } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - nreq := 0 - - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - // If the client's done, it - // will have reported any - // errors on its side. - return nil - default: - return err - } - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - if !f.HeadersEnded() { - return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) - } - nreq++ - if nreq == 1 { - ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) - } else { - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - } - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - } - } - ct.run() -} - -func TestTransportRetryHasLimit(t *testing.T) { - // Skip in short mode because the total expected delay is 1s+2s+4s+8s+16s=29s. - if testing.Short() { - t.Skip("skipping long test in short mode") - } - clientDone := make(chan struct{}) - ct := newClientTester(t) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - defer close(clientDone) - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - resp, err := ct.tr.RoundTrip(req) - if err == nil { - return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) - } - t.Logf("expected error, got: %v", err) - return nil - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - // If the client's done, it - // will have reported any - // errors on its side. - return nil - default: - return err - } - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - if !f.HeadersEnded() { - return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) - } - ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - } - } - ct.run() -} - -func TestTransportResponseDataBeforeHeaders(t *testing.T) { - ct := newClientTester(t) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - req := httptest.NewRequest("GET", "https://dummy.tld/", nil) - // First request is normal to ensure the check is per stream and not per connection. - _, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip expected no error, got: %v", err) - } - // Second request returns a DATA frame with no HEADERS. - resp, err := ct.tr.RoundTrip(req) - if err == nil { - return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) - } - if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol { - return fmt.Errorf("expected stream PROTOCOL_ERROR, got: %v", err) - } - return nil - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err == io.EOF { - return nil - } else if err != nil { - return err - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - switch f.StreamID { - case 1: - // Send a valid response to first request. - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - case 3: - ct.fr.WriteData(f.StreamID, true, []byte("payload")) - } - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - } - } - ct.run() -} -func TestTransportRequestsStallAtServerLimit(t *testing.T) { - const maxConcurrent = 2 - - greet := make(chan struct{}) // server sends initial SETTINGS frame - gotRequest := make(chan struct{}) // server received a request - clientDone := make(chan struct{}) - - // Collect errors from goroutines. - var wg sync.WaitGroup - errs := make(chan error, 100) - defer func() { - wg.Wait() - close(errs) - for err := range errs { - t.Error(err) - } - }() - - // We will send maxConcurrent+2 requests. This checker goroutine waits for the - // following stages: - // 1. The first maxConcurrent requests are received by the server. - // 2. The client will cancel the next request - // 3. The server is unblocked so it can service the first maxConcurrent requests - // 4. The client will send the final request - wg.Add(1) - unblockClient := make(chan struct{}) - clientRequestCancelled := make(chan struct{}) - unblockServer := make(chan struct{}) - go func() { - defer wg.Done() - // Stage 1. - for k := 0; k < maxConcurrent; k++ { - <-gotRequest - } - // Stage 2. - close(unblockClient) - <-clientRequestCancelled - // Stage 3: give some time for the final RoundTrip call to be scheduled and - // verify that the final request is not sent. - time.Sleep(50 * time.Millisecond) - select { - case <-gotRequest: - errs <- errors.New("last request did not stall") - close(unblockServer) - return - default: - } - close(unblockServer) - // Stage 4. - <-gotRequest - }() - - ct := newClientTester(t) - ct.client = func() error { - var wg sync.WaitGroup - defer func() { - wg.Wait() - close(clientDone) - ct.cc.(*net.TCPConn).CloseWrite() - }() - for k := 0; k < maxConcurrent+2; k++ { - wg.Add(1) - go func(k int) { - defer wg.Done() - // Don't send the second request until after receiving SETTINGS from the server - // to avoid a race where we use the default SettingMaxConcurrentStreams, which - // is much larger than maxConcurrent. We have to send the first request before - // waiting because the first request triggers the dial and greet. - if k > 0 { - <-greet - } - // Block until maxConcurrent requests are sent before sending any more. - if k >= maxConcurrent { - <-unblockClient - } - req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), nil) - if k == maxConcurrent { - // This request will be canceled. - cancel := make(chan struct{}) - req.Cancel = cancel - close(cancel) - _, err := ct.tr.RoundTrip(req) - close(clientRequestCancelled) - if err == nil { - errs <- fmt.Errorf("RoundTrip(%d) should have failed due to cancel", k) - return - } - } else { - resp, err := ct.tr.RoundTrip(req) - if err != nil { - errs <- fmt.Errorf("RoundTrip(%d): %v", k, err) - return - } - ioutil.ReadAll(resp.Body) - resp.Body.Close() - if resp.StatusCode != 204 { - errs <- fmt.Errorf("Status = %v; want 204", resp.StatusCode) - return - } - } - }(k) - } - return nil - } - - ct.server = func() error { - var wg sync.WaitGroup - defer wg.Wait() - - ct.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent}) - - // Server write loop. - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - writeResp := make(chan uint32, maxConcurrent+1) - - wg.Add(1) - go func() { - defer wg.Done() - <-unblockServer - for id := range writeResp { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: id, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - } - }() - - // Server read loop. - var nreq int - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - // If the client's done, it will have reported any errors on its side. - return nil - default: - return err - } - } - switch f := f.(type) { - case *WindowUpdateFrame: - case *SettingsFrame: - // Wait for the client SETTINGS ack until ending the greet. - close(greet) - case *HeadersFrame: - if !f.HeadersEnded() { - return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) - } - gotRequest <- struct{}{} - nreq++ - writeResp <- f.StreamID - if nreq == maxConcurrent+1 { - close(writeResp) - } - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - } - } - - ct.run() -} - -func TestAuthorityAddr(t *testing.T) { - tests := []struct { - scheme, authority string - want string - }{ - {"http", "foo.com", "foo.com:80"}, - {"https", "foo.com", "foo.com:443"}, - {"https", "foo.com:1234", "foo.com:1234"}, - {"https", "1.2.3.4:1234", "1.2.3.4:1234"}, - {"https", "1.2.3.4", "1.2.3.4:443"}, - {"https", "[::1]:1234", "[::1]:1234"}, - {"https", "[::1]", "[::1]:443"}, - } - for _, tt := range tests { - got := authorityAddr(tt.scheme, tt.authority) - if got != tt.want { - t.Errorf("authorityAddr(%q, %q) = %q; want %q", tt.scheme, tt.authority, got, tt.want) - } - } -} - -// Issue 20448: stop allocating for DATA frames' payload after -// Response.Body.Close is called. -func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) { - megabyteZero := make([]byte, 1<<20) - - writeErr := make(chan error, 1) - - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - w.(http.Flusher).Flush() - var sum int64 - for i := 0; i < 100; i++ { - n, err := w.Write(megabyteZero) - sum += int64(n) - if err != nil { - writeErr <- err - return - } - } - t.Logf("wrote all %d bytes", sum) - writeErr <- nil - }, optOnlyServer) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - res, err := c.Get(st.ts.URL) - if err != nil { - t.Fatal(err) - } - var buf [1]byte - if _, err := res.Body.Read(buf[:]); err != nil { - t.Error(err) - } - if err := res.Body.Close(); err != nil { - t.Error(err) - } - - trb, ok := res.Body.(transportResponseBody) - if !ok { - t.Fatalf("res.Body = %T; want transportResponseBody", res.Body) - } - if trb.cs.bufPipe.b != nil { - t.Errorf("response body pipe is still open") - } - - gotErr := <-writeErr - if gotErr == nil { - t.Errorf("Handler unexpectedly managed to write its entire response without getting an error") - } else if gotErr != errStreamClosed { - t.Errorf("Handler Write err = %v; want errStreamClosed", gotErr) - } -} - -// Issue 18891: make sure Request.Body == NoBody means no DATA frame -// is ever sent, even if empty. -func TestTransportNoBodyMeansNoDATA(t *testing.T) { - ct := newClientTester(t) - - unblockClient := make(chan bool) - - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", go18httpNoBody()) - ct.tr.RoundTrip(req) - <-unblockClient - return nil - } - ct.server = func() error { - defer close(unblockClient) - defer ct.cc.(*net.TCPConn).Close() - ct.greet() - - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - switch f := f.(type) { - default: - return fmt.Errorf("Got %T; want HeadersFrame", f) - case *WindowUpdateFrame, *SettingsFrame: - continue - case *HeadersFrame: - if !f.StreamEnded() { - return fmt.Errorf("got headers frame without END_STREAM") - } - return nil - } - } - } - ct.run() -} - -func benchSimpleRoundTrip(b *testing.B, nHeaders int) { - defer disableGoroutineTracking()() - b.ReportAllocs() - st := newServerTester(b, - func(w http.ResponseWriter, r *http.Request) { - }, - optOnlyServer, - optQuiet, - ) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() - - req, err := http.NewRequest("GET", st.ts.URL, nil) - if err != nil { - b.Fatal(err) - } - - for i := 0; i < nHeaders; i++ { - name := fmt.Sprint("A-", i) - req.Header.Set(name, "*") - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - res, err := tr.RoundTrip(req) - if err != nil { - if res != nil { - res.Body.Close() - } - b.Fatalf("RoundTrip err = %v; want nil", err) - } - res.Body.Close() - if res.StatusCode != http.StatusOK { - b.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK) - } - } -} - -func BenchmarkClientRequestHeaders(b *testing.B) { - b.Run(" 0 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 0) }) - b.Run(" 10 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 10) }) - b.Run(" 100 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 100) }) - b.Run("1000 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 1000) }) -} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go deleted file mode 100644 index 6b0dfae3..00000000 --- a/vendor/golang.org/x/net/http2/write.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "fmt" - "log" - "net/http" - "net/url" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -// writeFramer is implemented by any type that is used to write frames. -type writeFramer interface { - writeFrame(writeContext) error - - // staysWithinBuffer reports whether this writer promises that - // it will only write less than or equal to size bytes, and it - // won't Flush the write context. - staysWithinBuffer(size int) bool -} - -// writeContext is the interface needed by the various frame writer -// types below. All the writeFrame methods below are scheduled via the -// frame writing scheduler (see writeScheduler in writesched.go). -// -// This interface is implemented by *serverConn. -// -// TODO: decide whether to a) use this in the client code (which didn't -// end up using this yet, because it has a simpler design, not -// currently implementing priorities), or b) delete this and -// make the server code a bit more concrete. -type writeContext interface { - Framer() *Framer - Flush() error - CloseConn() error - // HeaderEncoder returns an HPACK encoder that writes to the - // returned buffer. - HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) -} - -// writeEndsStream reports whether w writes a frame that will transition -// the stream to a half-closed local state. This returns false for RST_STREAM, -// which closes the entire stream (not just the local half). -func writeEndsStream(w writeFramer) bool { - switch v := w.(type) { - case *writeData: - return v.endStream - case *writeResHeaders: - return v.endStream - case nil: - // This can only happen if the caller reuses w after it's - // been intentionally nil'ed out to prevent use. Keep this - // here to catch future refactoring breaking it. - panic("writeEndsStream called on nil writeFramer") - } - return false -} - -type flushFrameWriter struct{} - -func (flushFrameWriter) writeFrame(ctx writeContext) error { - return ctx.Flush() -} - -func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } - -type writeSettings []Setting - -func (s writeSettings) staysWithinBuffer(max int) bool { - const settingSize = 6 // uint16 + uint32 - return frameHeaderLen+settingSize*len(s) <= max - -} - -func (s writeSettings) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettings([]Setting(s)...) -} - -type writeGoAway struct { - maxStreamID uint32 - code ErrCode -} - -func (p *writeGoAway) writeFrame(ctx writeContext) error { - err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } - return err -} - -func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes - -type writeData struct { - streamID uint32 - p []byte - endStream bool -} - -func (w *writeData) String() string { - return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) -} - -func (w *writeData) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) -} - -func (w *writeData) staysWithinBuffer(max int) bool { - return frameHeaderLen+len(w.p) <= max -} - -// handlerPanicRST is the message sent from handler goroutines when -// the handler panics. -type handlerPanicRST struct { - StreamID uint32 -} - -func (hp handlerPanicRST) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) -} - -func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (se StreamError) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) -} - -func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -type writePingAck struct{ pf *PingFrame } - -func (w writePingAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WritePing(true, w.pf.Data) -} - -func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } - -type writeSettingsAck struct{} - -func (writeSettingsAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettingsAck() -} - -func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } - -// splitHeaderBlock splits headerBlock into fragments so that each fragment fits -// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true -// for the first/last fragment, respectively. -func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 - - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { - return err - } - first = false - } - return nil -} - -// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers or trailers from a server handler. -type writeResHeaders struct { - streamID uint32 - httpResCode int // 0 means no ":status" line - h http.Header // may be nil - trailers []string // if non-nil, which keys of h to write. nil means all. - endStream bool - - date string - contentType string - contentLength string -} - -func encKV(enc *hpack.Encoder, k, v string) { - if VerboseLogs { - log.Printf("http2: server encoding header %q = %q", k, v) - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) -} - -func (w *writeResHeaders) staysWithinBuffer(max int) bool { - // TODO: this is a common one. It'd be nice to return true - // here and get into the fast path if we could be clever and - // calculate the size fast enough, or at least a conservative - // uppper bound that usually fires. (Maybe if w.h and - // w.trailers are nil, so we don't need to enumerate it.) - // Otherwise I'm afraid that just calculating the length to - // answer this question would be slower than the ~2µs benefit. - return false -} - -func (w *writeResHeaders) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - if w.httpResCode != 0 { - encKV(enc, ":status", httpCodeString(w.httpResCode)) - } - - encodeHeaders(enc, w.h, w.trailers) - - if w.contentType != "" { - encKV(enc, "content-type", w.contentType) - } - if w.contentLength != "" { - encKV(enc, "content-length", w.contentLength) - } - if w.date != "" { - encKV(enc, "date", w.date) - } - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 && w.trailers == nil { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. -type writePushPromise struct { - streamID uint32 // pusher stream - method string // for :method - url *url.URL // for :scheme, :authority, :path - h http.Header - - // Creates an ID for a pushed stream. This runs on serveG just before - // the frame is written. The returned ID is copied to promisedID. - allocatePromisedID func() (uint32, error) - promisedID uint32 -} - -func (w *writePushPromise) staysWithinBuffer(max int) bool { - // TODO: see writeResHeaders.staysWithinBuffer - return false -} - -func (w *writePushPromise) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - encKV(enc, ":method", w.method) - encKV(enc, ":scheme", w.url.Scheme) - encKV(enc, ":authority", w.url.Host) - encKV(enc, ":path", w.url.RequestURI()) - encodeHeaders(enc, w.h, nil) - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WritePushPromise(PushPromiseParam{ - StreamID: w.streamID, - PromiseID: w.promisedID, - BlockFragment: frag, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -type write100ContinueHeadersFrame struct { - streamID uint32 -} - -func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - encKV(enc, ":status", "100") - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: buf.Bytes(), - EndStream: false, - EndHeaders: true, - }) -} - -func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { - // Sloppy but conservative: - return 9+2*(len(":status")+len("100")) <= max -} - -type writeWindowUpdate struct { - streamID uint32 // or 0 for conn-level - n uint32 -} - -func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) -} - -// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) -// is encoded only only if k is in keys. -func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - if keys == nil { - sorter := sorterPool.Get().(*sorter) - // Using defer here, since the returned keys from the - // sorter.Keys method is only valid until the sorter - // is returned: - defer sorterPool.Put(sorter) - keys = sorter.Keys(h) - } - for _, k := range keys { - vv := h[k] - k = lowerHeader(k) - if !validWireHeaderFieldName(k) { - // Skip it as backup paranoia. Per - // golang.org/issue/14048, these should - // already be rejected at a higher level. - continue - } - isTE := k == "transfer-encoding" - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - // TODO: return an error? golang.org/issue/14048 - // For now just omit it. - continue - } - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if isTE && v != "trailers" { - continue - } - encKV(enc, k, v) - } - } -} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go deleted file mode 100644 index 4fe30730..00000000 --- a/vendor/golang.org/x/net/http2/writesched.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "fmt" - -// WriteScheduler is the interface implemented by HTTP/2 write schedulers. -// Methods are never called concurrently. -type WriteScheduler interface { - // OpenStream opens a new stream in the write scheduler. - // It is illegal to call this with streamID=0 or with a streamID that is - // already open -- the call may panic. - OpenStream(streamID uint32, options OpenStreamOptions) - - // CloseStream closes a stream in the write scheduler. Any frames queued on - // this stream should be discarded. It is illegal to call this on a stream - // that is not open -- the call may panic. - CloseStream(streamID uint32) - - // AdjustStream adjusts the priority of the given stream. This may be called - // on a stream that has not yet been opened or has been closed. Note that - // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: - // https://tools.ietf.org/html/rfc7540#section-5.1 - AdjustStream(streamID uint32, priority PriorityParam) - - // Push queues a frame in the scheduler. In most cases, this will not be - // called with wr.StreamID()!=0 unless that stream is currently open. The one - // exception is RST_STREAM frames, which may be sent on idle or closed streams. - Push(wr FrameWriteRequest) - - // Pop dequeues the next frame to write. Returns false if no frames can - // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. - Pop() (wr FrameWriteRequest, ok bool) -} - -// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. -type OpenStreamOptions struct { - // PusherID is zero if the stream was initiated by the client. Otherwise, - // PusherID names the stream that pushed the newly opened stream. - PusherID uint32 -} - -// FrameWriteRequest is a request to write a frame. -type FrameWriteRequest struct { - // write is the interface value that does the writing, once the - // WriteScheduler has selected this frame to write. The write - // functions are all defined in write.go. - write writeFramer - - // stream is the stream on which this frame will be written. - // nil for non-stream frames like PING and SETTINGS. - stream *stream - - // done, if non-nil, must be a buffered channel with space for - // 1 message and is sent the return value from write (or an - // earlier error) when the frame has been written. - done chan error -} - -// StreamID returns the id of the stream this frame will be written to. -// 0 is used for non-stream frames such as PING and SETTINGS. -func (wr FrameWriteRequest) StreamID() uint32 { - if wr.stream == nil { - if se, ok := wr.write.(StreamError); ok { - // (*serverConn).resetStream doesn't set - // stream because it doesn't necessarily have - // one. So special case this type of write - // message. - return se.StreamID - } - return 0 - } - return wr.stream.id -} - -// DataSize returns the number of flow control bytes that must be consumed -// to write this entire frame. This is 0 for non-DATA frames. -func (wr FrameWriteRequest) DataSize() int { - if wd, ok := wr.write.(*writeData); ok { - return len(wd.p) - } - return 0 -} - -// Consume consumes min(n, available) bytes from this frame, where available -// is the number of flow control bytes available on the stream. Consume returns -// 0, 1, or 2 frames, where the integer return value gives the number of frames -// returned. -// -// If flow control prevents consuming any bytes, this returns (_, _, 0). If -// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this -// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and -// 'rest' contains the remaining bytes. The consumed bytes are deducted from the -// underlying stream's flow control budget. -func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { - var empty FrameWriteRequest - - // Non-DATA frames are always consumed whole. - wd, ok := wr.write.(*writeData) - if !ok || len(wd.p) == 0 { - return wr, empty, 1 - } - - // Might need to split after applying limits. - allowed := wr.stream.flow.available() - if n < allowed { - allowed = n - } - if wr.stream.sc.maxFrameSize < allowed { - allowed = wr.stream.sc.maxFrameSize - } - if allowed <= 0 { - return empty, empty, 0 - } - if len(wd.p) > int(allowed) { - wr.stream.flow.take(allowed) - consumed := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[:allowed], - // Even if the original had endStream set, there - // are bytes remaining because len(wd.p) > allowed, - // so we know endStream is false. - endStream: false, - }, - // Our caller is blocking on the final DATA frame, not - // this intermediate frame, so no need to wait. - done: nil, - } - rest := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[allowed:], - endStream: wd.endStream, - }, - done: wr.done, - } - return consumed, rest, 2 - } - - // The frame is consumed whole. - // NB: This cast cannot overflow because allowed is <= math.MaxInt32. - wr.stream.flow.take(int32(len(wd.p))) - return wr, empty, 1 -} - -// String is for debugging only. -func (wr FrameWriteRequest) String() string { - var des string - if s, ok := wr.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wr.write) - } - return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) -} - -// replyToWriter sends err to wr.done and panics if the send must block -// This does nothing if wr.done is nil. -func (wr *FrameWriteRequest) replyToWriter(err error) { - if wr.done == nil { - return - } - select { - case wr.done <- err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) - } - wr.write = nil // prevent use (assume it's tainted after wr.done send) -} - -// writeQueue is used by implementations of WriteScheduler. -type writeQueue struct { - s []FrameWriteRequest -} - -func (q *writeQueue) empty() bool { return len(q.s) == 0 } - -func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) -} - -func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { - panic("invalid use of queue") - } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] - return wr -} - -// consume consumes up to n bytes from q.s[0]. If the frame is -// entirely consumed, it is removed from the queue. If the frame -// is partially consumed, the frame is kept with the consumed -// bytes removed. Returns true iff any bytes were consumed. -func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { - return FrameWriteRequest{}, false - } - consumed, rest, numresult := q.s[0].Consume(n) - switch numresult { - case 0: - return FrameWriteRequest{}, false - case 1: - q.shift() - case 2: - q.s[0] = rest - } - return consumed, true -} - -type writeQueuePool []*writeQueue - -// put inserts an unused writeQueue into the pool. -func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} - } - q.s = q.s[:0] - *p = append(*p, q) -} - -// get returns an empty writeQueue. -func (p *writeQueuePool) get() *writeQueue { - ln := len(*p) - if ln == 0 { - return new(writeQueue) - } - x := ln - 1 - q := (*p)[x] - (*p)[x] = nil - *p = (*p)[:x] - return q -} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go deleted file mode 100644 index 848fed6e..00000000 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "fmt" - "math" - "sort" -) - -// RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 - -// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. -type PriorityWriteSchedulerConfig struct { - // MaxClosedNodesInTree controls the maximum number of closed streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // "It is possible for a stream to become closed while prioritization - // information ... is in transit. ... This potentially creates suboptimal - // prioritization, since the stream could be given a priority that is - // different from what is intended. To avoid these problems, an endpoint - // SHOULD retain stream prioritization state for a period after streams - // become closed. The longer state is retained, the lower the chance that - // streams are assigned incorrect or default priority values." - MaxClosedNodesInTree int - - // MaxIdleNodesInTree controls the maximum number of idle streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // Similarly, streams that are in the "idle" state can be assigned - // priority or become a parent of other streams. This allows for the - // creation of a grouping node in the dependency tree, which enables - // more flexible expressions of priority. Idle streams begin with a - // default priority (Section 5.3.5). - MaxIdleNodesInTree int - - // ThrottleOutOfOrderWrites enables write throttling to help ensure that - // data is delivered in priority order. This works around a race where - // stream B depends on stream A and both streams are about to call Write - // to queue DATA frames. If B wins the race, a naive scheduler would eagerly - // write as much data from B as possible, but this is suboptimal because A - // is a higher-priority stream. With throttling enabled, we write a small - // amount of data from B to minimize the amount of bandwidth that B can - // steal from A. - ThrottleOutOfOrderWrites bool -} - -// NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. -// If cfg is nil, default options are used. -func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { - if cfg == nil { - // For justification of these defaults, see: - // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY - cfg = &PriorityWriteSchedulerConfig{ - MaxClosedNodesInTree: 10, - MaxIdleNodesInTree: 10, - ThrottleOutOfOrderWrites: false, - } - } - - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), - maxClosedNodesInTree: cfg.MaxClosedNodesInTree, - maxIdleNodesInTree: cfg.MaxIdleNodesInTree, - enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, - } - ws.nodes[0] = &ws.root - if cfg.ThrottleOutOfOrderWrites { - ws.writeThrottleLimit = 1024 - } else { - ws.writeThrottleLimit = math.MaxInt32 - } - return ws -} - -type priorityNodeState int - -const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle -) - -// priorityNode is a node in an HTTP/2 priority tree. -// Each node is associated with a single stream ID. -// See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree - - // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings -} - -func (n *priorityNode) setParent(parent *priorityNode) { - if n == parent { - panic("setParent to self") - } - if n.parent == parent { - return - } - // Unlink from current parent. - if parent := n.parent; parent != nil { - if n.prev == nil { - parent.kids = n.next - } else { - n.prev.next = n.next - } - if n.next != nil { - n.next.prev = n.prev - } - } - // Link to new parent. - // If parent=nil, remove n from the tree. - // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). - n.parent = parent - if parent == nil { - n.next = nil - n.prev = nil - } else { - n.next = parent.kids - n.prev = nil - if n.next != nil { - n.next.prev = n - } - parent.kids = n - } -} - -func (n *priorityNode) addBytes(b int64) { - n.bytes += b - for ; n != nil; n = n.parent { - n.subtreeBytes += b - } -} - -// walkReadyInOrder iterates over the tree in priority order, calling f for each node -// with a non-empty write queue. When f returns true, this funcion returns true and the -// walk halts. tmp is used as scratch space for sorting. -// -// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true -// if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { - if !n.q.empty() && f(n, openParent) { - return true - } - if n.kids == nil { - return false - } - - // Don't consider the root "open" when updating openParent since - // we can't send data frames on the root stream (only control frames). - if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) - } - - // Common case: only one kid or all kids have the same weight. - // Some clients don't use weights; other clients (like web browsers) - // use mostly-linear priority trees. - w := n.kids.weight - needSort := false - for k := n.kids.next; k != nil; k = k.next { - if k.weight != w { - needSort = true - break - } - } - if !needSort { - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false - } - - // Uncommon case: sort the child nodes. We remove the kids from the parent, - // then re-insert after sorting so we can reuse tmp for future sort calls. - *tmp = (*tmp)[:0] - for n.kids != nil { - *tmp = append(*tmp, n.kids) - n.kids.setParent(nil) - } - sort.Sort(sortPriorityNodeSiblings(*tmp)) - for i := len(*tmp) - 1; i >= 0; i-- { - (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids - } - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false -} - -type sortPriorityNodeSiblings []*priorityNode - -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { - // Prefer the subtree that has sent fewer bytes relative to its weight. - // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) - if bi == 0 && bk == 0 { - return wi >= wk - } - if bk == 0 { - return false - } - return bi/bk <= wi/wk -} - -type priorityWriteScheduler struct { - // root is the root of the priority tree, where root.id = 0. - // The root queues control frames that are not associated with any stream. - root priorityNode - - // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode - - // maxID is the maximum stream id in nodes. - maxID uint32 - - // lists of nodes that have been closed or are idle, but are kept in - // the tree for improved prioritization. When the lengths exceed either - // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode - - // From the config. - maxClosedNodesInTree int - maxIdleNodesInTree int - writeThrottleLimit int32 - enableWriteThrottle bool - - // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // The stream may be currently idle but cannot be opened or closed. - if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { - panic(fmt.Sprintf("stream %d already opened", streamID)) - } - curr.state = priorityNodeOpen - return - } - - // RFC 7540, Section 5.3.5: - // "All streams are initially assigned a non-exclusive dependency on stream 0x0. - // Pushed streams initially depend on their associated stream. In both cases, - // streams are assigned a default weight of 16." - parent := ws.nodes[options.PusherID] - if parent == nil { - parent = &ws.root - } - n := &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, - } - n.setParent(parent) - ws.nodes[streamID] = n - if streamID > ws.maxID { - ws.maxID = streamID - } -} - -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { - if streamID == 0 { - panic("violation of WriteScheduler interface: cannot close stream 0") - } - if ws.nodes[streamID] == nil { - panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) - } - if ws.nodes[streamID].state != priorityNodeOpen { - panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) - } - - n := ws.nodes[streamID] - n.state = priorityNodeClosed - n.addBytes(-n.bytes) - - q := n.q - ws.queuePool.put(&q) - n.q.s = nil - if ws.maxClosedNodesInTree > 0 { - ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) - } else { - ws.removeNode(n) - } -} - -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - if streamID == 0 { - panic("adjustPriority on root") - } - - // If streamID does not exist, there are two cases: - // - A closed stream that has been removed (this will have ID <= maxID) - // - An idle stream that is being used for "grouping" (this will have ID > maxID) - n := ws.nodes[streamID] - if n == nil { - if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { - return - } - ws.maxID = streamID - n = &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, - } - n.setParent(&ws.root) - ws.nodes[streamID] = n - ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) - } - - // Section 5.3.1: A dependency on a stream that is not currently in the tree - // results in that stream being given a default priority (Section 5.3.5). - parent := ws.nodes[priority.StreamDep] - if parent == nil { - n.setParent(&ws.root) - n.weight = priorityDefaultWeight - return - } - - // Ignore if the client tries to make a node its own parent. - if n == parent { - return - } - - // Section 5.3.3: - // "If a stream is made dependent on one of its own dependencies, the - // formerly dependent stream is first moved to be dependent on the - // reprioritized stream's previous parent. The moved dependency retains - // its weight." - // - // That is: if parent depends on n, move parent to depend on n.parent. - for x := parent.parent; x != nil; x = x.parent { - if x == n { - parent.setParent(n.parent) - break - } - } - - // Section 5.3.3: The exclusive flag causes the stream to become the sole - // dependency of its parent stream, causing other dependencies to become - // dependent on the exclusive stream. - if priority.Exclusive { - k := parent.kids - for k != nil { - next := k.next - if k != n { - k.setParent(n) - } - k = next - } - } - - n.setParent(parent) - n.weight = priority.Weight -} - -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode - if id := wr.StreamID(); id == 0 { - n = &ws.root - } else { - n = ws.nodes[id] - if n == nil { - // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. - if wr.DataSize() > 0 { - panic("add DATA on non-open stream") - } - n = &ws.root - } - } - n.q.push(wr) -} - -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { - limit := int32(math.MaxInt32) - if openParent { - limit = ws.writeThrottleLimit - } - wr, ok = n.q.consume(limit) - if !ok { - return false - } - n.addBytes(int64(wr.DataSize())) - // If B depends on A and B continuously has data available but A - // does not, gradually increase the throttling limit to allow B to - // steal more and more bandwidth from A. - if openParent { - ws.writeThrottleLimit += 1024 - if ws.writeThrottleLimit < 0 { - ws.writeThrottleLimit = math.MaxInt32 - } - } else if ws.enableWriteThrottle { - ws.writeThrottleLimit = 1024 - } - return true - }) - return wr, ok -} - -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { - if maxSize == 0 { - return - } - if len(*list) == maxSize { - // Remove the oldest node, then shift left. - ws.removeNode((*list)[0]) - x := (*list)[1:] - copy(*list, x) - *list = (*list)[:len(x)] - } - *list = append(*list, n) -} - -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) - } - n.setParent(nil) - delete(ws.nodes, n.id) -} diff --git a/vendor/golang.org/x/net/http2/writesched_priority_test.go b/vendor/golang.org/x/net/http2/writesched_priority_test.go deleted file mode 100644 index f2b535a2..00000000 --- a/vendor/golang.org/x/net/http2/writesched_priority_test.go +++ /dev/null @@ -1,541 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "fmt" - "sort" - "testing" -) - -func defaultPriorityWriteScheduler() *priorityWriteScheduler { - return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler) -} - -func checkPriorityWellFormed(ws *priorityWriteScheduler) error { - for id, n := range ws.nodes { - if id != n.id { - return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id) - } - if n.parent == nil { - if n.next != nil || n.prev != nil { - return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id) - } - continue - } - found := false - for k := n.parent.kids; k != nil; k = k.next { - if k.id == id { - found = true - break - } - } - if !found { - return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id) - } - } - return nil -} - -func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string { - var ids []int - for _, n := range ws.nodes { - ids = append(ids, int(n.id)) - } - sort.Ints(ids) - - var buf bytes.Buffer - for _, id := range ids { - if buf.Len() != 0 { - buf.WriteString(" ") - } - if id == 0 { - buf.WriteString(fmtNode(&ws.root)) - } else { - buf.WriteString(fmtNode(ws.nodes[uint32(id)])) - } - } - return buf.String() -} - -func fmtNodeParentSkipRoot(n *priorityNode) string { - switch { - case n.id == 0: - return "" - case n.parent == nil: - return fmt.Sprintf("%d{parent:nil}", n.id) - default: - return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id) - } -} - -func fmtNodeWeightParentSkipRoot(n *priorityNode) string { - switch { - case n.id == 0: - return "" - case n.parent == nil: - return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight) - default: - return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id) - } -} - -func TestPriorityTwoStreams(t *testing.T) { - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{}) - - want := "1{weight:15,parent:0} 2{weight:15,parent:0}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After open\ngot %q\nwant %q", got, want) - } - - // Move 1's parent to 2. - ws.AdjustStream(1, PriorityParam{ - StreamDep: 2, - Weight: 32, - Exclusive: false, - }) - want = "1{weight:32,parent:2} 2{weight:15,parent:0}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After adjust\ngot %q\nwant %q", got, want) - } - - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func TestPriorityAdjustExclusiveZero(t *testing.T) { - // 1, 2, and 3 are all children of the 0 stream. - // Exclusive reprioritization to any of the streams should bring - // the rest of the streams under the reprioritized stream. - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{}) - ws.OpenStream(3, OpenStreamOptions{}) - - want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After open\ngot %q\nwant %q", got, want) - } - - ws.AdjustStream(2, PriorityParam{ - StreamDep: 0, - Weight: 20, - Exclusive: true, - }) - want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After adjust\ngot %q\nwant %q", got, want) - } - - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func TestPriorityAdjustOwnParent(t *testing.T) { - // Assigning a node as its own parent should have no effect. - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{}) - ws.AdjustStream(2, PriorityParam{ - StreamDep: 2, - Weight: 20, - Exclusive: true, - }) - want := "1{weight:15,parent:0} 2{weight:15,parent:0}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After adjust\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func TestPriorityClosedStreams(t *testing.T) { - ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler) - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) - ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) - - // Close the first three streams. We lose 1, but keep 2 and 3. - ws.CloseStream(1) - ws.CloseStream(2) - ws.CloseStream(3) - - want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After close\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } - - // Adding a stream as an exclusive child of 1 gives it default - // priorities, since 1 is gone. - ws.OpenStream(5, OpenStreamOptions{}) - ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true}) - - // Adding a stream as an exclusive child of 2 should work, since 2 is not gone. - ws.OpenStream(6, OpenStreamOptions{}) - ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true}) - - want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After add streams\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func TestPriorityClosedStreamsDisabled(t *testing.T) { - ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) - - // Close the first two streams. We keep only 3. - ws.CloseStream(1) - ws.CloseStream(2) - - want := "3{weight:15,parent:0}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After close\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func TestPriorityIdleStreams(t *testing.T) { - ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler) - ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle - ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle - ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle - ws.OpenStream(4, OpenStreamOptions{}) - ws.OpenStream(5, OpenStreamOptions{}) - ws.OpenStream(6, OpenStreamOptions{}) - ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15}) - ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15}) - ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15}) - - want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After open\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func TestPriorityIdleStreamsDisabled(t *testing.T) { - ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) - ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle - ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle - ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle - ws.OpenStream(4, OpenStreamOptions{}) - - want := "4{weight:15,parent:0}" - if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { - t.Errorf("After open\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func TestPrioritySection531NonExclusive(t *testing.T) { - // Example from RFC 7540 Section 5.3.1. - // A,B,C,D = 1,2,3,4 - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(4, OpenStreamOptions{}) - ws.AdjustStream(4, PriorityParam{ - StreamDep: 1, - Weight: 15, - Exclusive: false, - }) - want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}" - if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { - t.Errorf("After adjust\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func TestPrioritySection531Exclusive(t *testing.T) { - // Example from RFC 7540 Section 5.3.1. - // A,B,C,D = 1,2,3,4 - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(4, OpenStreamOptions{}) - ws.AdjustStream(4, PriorityParam{ - StreamDep: 1, - Weight: 15, - Exclusive: true, - }) - want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}" - if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { - t.Errorf("After adjust\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func makeSection533Tree() *priorityWriteScheduler { - // Initial tree from RFC 7540 Section 5.3.3. - // A,B,C,D,E,F = 1,2,3,4,5,6 - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) - ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) - ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) - return ws -} - -func TestPrioritySection533NonExclusive(t *testing.T) { - // Example from RFC 7540 Section 5.3.3. - // A,B,C,D,E,F = 1,2,3,4,5,6 - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) - ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) - ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) - ws.AdjustStream(1, PriorityParam{ - StreamDep: 4, - Weight: 15, - Exclusive: false, - }) - want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}" - if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { - t.Errorf("After adjust\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func TestPrioritySection533Exclusive(t *testing.T) { - // Example from RFC 7540 Section 5.3.3. - // A,B,C,D,E,F = 1,2,3,4,5,6 - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) - ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) - ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) - ws.AdjustStream(1, PriorityParam{ - StreamDep: 4, - Weight: 15, - Exclusive: true, - }) - want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}" - if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { - t.Errorf("After adjust\ngot %q\nwant %q", got, want) - } - if err := checkPriorityWellFormed(ws); err != nil { - t.Error(err) - } -} - -func checkPopAll(ws WriteScheduler, order []uint32) error { - for k, id := range order { - wr, ok := ws.Pop() - if !ok { - return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order) - } - if got := wr.StreamID(); got != id { - return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order) - } - } - wr, ok := ws.Pop() - if ok { - return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order) - } - return nil -} - -func TestPriorityPopFrom533Tree(t *testing.T) { - ws := makeSection533Tree() - - ws.Push(makeWriteHeadersRequest(3 /*C*/)) - ws.Push(makeWriteNonStreamRequest()) - ws.Push(makeWriteHeadersRequest(5 /*E*/)) - ws.Push(makeWriteHeadersRequest(1 /*A*/)) - t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) - - if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil { - t.Error(err) - } -} - -func TestPriorityPopFromLinearTree(t *testing.T) { - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) - ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) - - ws.Push(makeWriteHeadersRequest(3)) - ws.Push(makeWriteHeadersRequest(4)) - ws.Push(makeWriteHeadersRequest(1)) - ws.Push(makeWriteHeadersRequest(2)) - ws.Push(makeWriteNonStreamRequest()) - ws.Push(makeWriteNonStreamRequest()) - t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) - - if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil { - t.Error(err) - } -} - -func TestPriorityFlowControl(t *testing.T) { - ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false}) - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - - sc := &serverConn{maxFrameSize: 16} - st1 := &stream{id: 1, sc: sc} - st2 := &stream{id: 2, sc: sc} - - ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil}) - ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil}) - ws.AdjustStream(2, PriorityParam{StreamDep: 1}) - - // No flow-control bytes available. - if wr, ok := ws.Pop(); ok { - t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr) - } - - // Add enough flow-control bytes to write st2 in two Pop calls. - // Should write data from st2 even though it's lower priority than st1. - for i := 1; i <= 2; i++ { - st2.flow.add(8) - wr, ok := ws.Pop() - if !ok { - t.Fatalf("Pop(%d)=false, want true", i) - } - if got, want := wr.DataSize(), 8; got != want { - t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want) - } - } -} - -func TestPriorityThrottleOutOfOrderWrites(t *testing.T) { - ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true}) - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) - - sc := &serverConn{maxFrameSize: 4096} - st1 := &stream{id: 1, sc: sc} - st2 := &stream{id: 2, sc: sc} - st1.flow.add(4096) - st2.flow.add(4096) - ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil}) - ws.AdjustStream(2, PriorityParam{StreamDep: 1}) - - // We have enough flow-control bytes to write st2 in a single Pop call. - // However, due to out-of-order write throttling, the first call should - // only write 1KB. - wr, ok := ws.Pop() - if !ok { - t.Fatalf("Pop(st2.first)=false, want true") - } - if got, want := wr.StreamID(), uint32(2); got != want { - t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want) - } - if got, want := wr.DataSize(), 1024; got != want { - t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want) - } - - // Now add data on st1. This should take precedence. - ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil}) - wr, ok = ws.Pop() - if !ok { - t.Fatalf("Pop(st1)=false, want true") - } - if got, want := wr.StreamID(), uint32(1); got != want { - t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want) - } - if got, want := wr.DataSize(), 4096; got != want { - t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want) - } - - // Should go back to writing 1KB from st2. - wr, ok = ws.Pop() - if !ok { - t.Fatalf("Pop(st2.last)=false, want true") - } - if got, want := wr.StreamID(), uint32(2); got != want { - t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want) - } - if got, want := wr.DataSize(), 1024; got != want { - t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want) - } -} - -func TestPriorityWeights(t *testing.T) { - ws := defaultPriorityWriteScheduler() - ws.OpenStream(1, OpenStreamOptions{}) - ws.OpenStream(2, OpenStreamOptions{}) - - sc := &serverConn{maxFrameSize: 8} - st1 := &stream{id: 1, sc: sc} - st2 := &stream{id: 2, sc: sc} - st1.flow.add(40) - st2.flow.add(40) - - ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil}) - ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil}) - ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34}) - ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9}) - - // st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)). - // The maximum frame size is 8 bytes. The write sequence should be: - // st1, total bytes so far is (st1=8, st=0) - // st2, total bytes so far is (st1=8, st=8) - // st1, total bytes so far is (st1=16, st=8) - // st1, total bytes so far is (st1=24, st=8) // 3x bandwidth - // st1, total bytes so far is (st1=32, st=8) // 4x bandwidth - // st2, total bytes so far is (st1=32, st=16) // 2x bandwidth - // st1, total bytes so far is (st1=40, st=16) - // st2, total bytes so far is (st1=40, st=24) - // st2, total bytes so far is (st1=40, st=32) - // st2, total bytes so far is (st1=40, st=40) - if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil { - t.Error(err) - } -} - -func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) { - ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ - MaxClosedNodesInTree: 0, - MaxIdleNodesInTree: 0, - }) - ws.OpenStream(1, OpenStreamOptions{}) - ws.CloseStream(1) - ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)}) - ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)}) - - if err := checkPopAll(ws, []uint32{1, 2}); err != nil { - t.Error(err) - } -} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go deleted file mode 100644 index 36d7919f..00000000 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "math" - -// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 -// priorities. Control frames like SETTINGS and PING are written before DATA -// frames, but if no control frames are queued and multiple streams have queued -// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. -func NewRandomWriteScheduler() WriteScheduler { - return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} -} - -type randomWriteScheduler struct { - // zero are frames not associated with a specific stream. - zero writeQueue - - // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. - sq map[uint32]*writeQueue - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // no-op: idle streams are not tracked -} - -func (ws *randomWriteScheduler) CloseStream(streamID uint32) { - q, ok := ws.sq[streamID] - if !ok { - return - } - delete(ws.sq, streamID) - ws.queuePool.put(q) -} - -func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - // no-op: priorities are ignored -} - -func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { - id := wr.StreamID() - if id == 0 { - ws.zero.push(wr) - return - } - q, ok := ws.sq[id] - if !ok { - q = ws.queuePool.get() - ws.sq[id] = q - } - q.push(wr) -} - -func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { - // Control frames first. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { - if wr, ok := q.consume(math.MaxInt32); ok { - return wr, true - } - } - return FrameWriteRequest{}, false -} diff --git a/vendor/golang.org/x/net/http2/writesched_random_test.go b/vendor/golang.org/x/net/http2/writesched_random_test.go deleted file mode 100644 index 3bf4aa36..00000000 --- a/vendor/golang.org/x/net/http2/writesched_random_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "testing" - -func TestRandomScheduler(t *testing.T) { - ws := NewRandomWriteScheduler() - ws.Push(makeWriteHeadersRequest(3)) - ws.Push(makeWriteHeadersRequest(4)) - ws.Push(makeWriteHeadersRequest(1)) - ws.Push(makeWriteHeadersRequest(2)) - ws.Push(makeWriteNonStreamRequest()) - ws.Push(makeWriteNonStreamRequest()) - - // Pop all frames. Should get the non-stream requests first, - // followed by the stream requests in any order. - var order []FrameWriteRequest - for { - wr, ok := ws.Pop() - if !ok { - break - } - order = append(order, wr) - } - t.Logf("got frames: %v", order) - if len(order) != 6 { - t.Fatalf("got %d frames, expected 6", len(order)) - } - if order[0].StreamID() != 0 || order[1].StreamID() != 0 { - t.Fatal("expected non-stream frames first", order[0], order[1]) - } - got := make(map[uint32]bool) - for _, wr := range order[2:] { - got[wr.StreamID()] = true - } - for id := uint32(1); id <= 4; id++ { - if !got[id] { - t.Errorf("frame not found for stream %d", id) - } - } -} diff --git a/vendor/golang.org/x/net/http2/writesched_test.go b/vendor/golang.org/x/net/http2/writesched_test.go deleted file mode 100644 index 0807056b..00000000 --- a/vendor/golang.org/x/net/http2/writesched_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "fmt" - "math" - "reflect" - "testing" -) - -func makeWriteNonStreamRequest() FrameWriteRequest { - return FrameWriteRequest{writeSettingsAck{}, nil, nil} -} - -func makeWriteHeadersRequest(streamID uint32) FrameWriteRequest { - st := &stream{id: streamID} - return FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil} -} - -func checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error { - consumed, rest, n := wr.Consume(nbytes) - var wantConsumed, wantRest FrameWriteRequest - switch len(want) { - case 0: - case 1: - wantConsumed = want[0] - case 2: - wantConsumed = want[0] - wantRest = want[1] - } - if !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) { - return fmt.Errorf("got %v, %v, %v\nwant %v, %v, %v", consumed, rest, n, wantConsumed, wantRest, len(want)) - } - return nil -} - -func TestFrameWriteRequestNonData(t *testing.T) { - wr := makeWriteNonStreamRequest() - if got, want := wr.DataSize(), 0; got != want { - t.Errorf("DataSize: got %v, want %v", got, want) - } - - // Non-DATA frames are always consumed whole. - if err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil { - t.Errorf("Consume:\n%v", err) - } -} - -func TestFrameWriteRequestData(t *testing.T) { - st := &stream{ - id: 1, - sc: &serverConn{maxFrameSize: 16}, - } - const size = 32 - wr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)} - if got, want := wr.DataSize(), size; got != want { - t.Errorf("DataSize: got %v, want %v", got, want) - } - - // No flow-control bytes available: cannot consume anything. - if err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil { - t.Errorf("Consume(limited by flow control):\n%v", err) - } - - // Add enough flow-control bytes to consume the entire frame, - // but we're now restricted by st.sc.maxFrameSize. - st.flow.add(size) - want := []FrameWriteRequest{ - { - write: &writeData{st.id, make([]byte, st.sc.maxFrameSize), false}, - stream: st, - done: nil, - }, - { - write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true}, - stream: st, - done: wr.done, - }, - } - if err := checkConsume(wr, math.MaxInt32, want); err != nil { - t.Errorf("Consume(limited by maxFrameSize):\n%v", err) - } - rest := want[1] - - // Consume 8 bytes from the remaining frame. - want = []FrameWriteRequest{ - { - write: &writeData{st.id, make([]byte, 8), false}, - stream: st, - done: nil, - }, - { - write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, - stream: st, - done: wr.done, - }, - } - if err := checkConsume(rest, 8, want); err != nil { - t.Errorf("Consume(8):\n%v", err) - } - rest = want[1] - - // Consume all remaining bytes. - want = []FrameWriteRequest{ - { - write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, - stream: st, - done: wr.done, - }, - } - if err := checkConsume(rest, math.MaxInt32, want); err != nil { - t.Errorf("Consume(remainder):\n%v", err) - } -} - -func TestFrameWriteRequest_StreamID(t *testing.T) { - const streamID = 123 - wr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)} - if got := wr.StreamID(); got != streamID { - t.Errorf("FrameWriteRequest(StreamError) = %v; want %v", got, streamID) - } -} diff --git a/vendor/golang.org/x/net/http2/z_spec_test.go b/vendor/golang.org/x/net/http2/z_spec_test.go deleted file mode 100644 index 610b2cdb..00000000 --- a/vendor/golang.org/x/net/http2/z_spec_test.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "encoding/xml" - "flag" - "fmt" - "io" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "testing" -) - -var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests") - -// The global map of sentence coverage for the http2 spec. -var defaultSpecCoverage specCoverage - -var loadSpecOnce sync.Once - -func loadSpec() { - if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil { - panic(err) - } else { - defaultSpecCoverage = readSpecCov(f) - f.Close() - } -} - -// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not -// "covered" will be included in report outputted by TestSpecCoverage. -func covers(sec, sentences string) { - loadSpecOnce.Do(loadSpec) - defaultSpecCoverage.cover(sec, sentences) -} - -type specPart struct { - section string - sentence string -} - -func (ss specPart) Less(oo specPart) bool { - atoi := func(s string) int { - n, err := strconv.Atoi(s) - if err != nil { - panic(err) - } - return n - } - a := strings.Split(ss.section, ".") - b := strings.Split(oo.section, ".") - for len(a) > 0 { - if len(b) == 0 { - return false - } - x, y := atoi(a[0]), atoi(b[0]) - if x == y { - a, b = a[1:], b[1:] - continue - } - return x < y - } - if len(b) > 0 { - return true - } - return false -} - -type bySpecSection []specPart - -func (a bySpecSection) Len() int { return len(a) } -func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) } -func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type specCoverage struct { - coverage map[specPart]bool - d *xml.Decoder -} - -func joinSection(sec []int) string { - s := fmt.Sprintf("%d", sec[0]) - for _, n := range sec[1:] { - s = fmt.Sprintf("%s.%d", s, n) - } - return s -} - -func (sc specCoverage) readSection(sec []int) { - var ( - buf = new(bytes.Buffer) - sub = 0 - ) - for { - tk, err := sc.d.Token() - if err != nil { - if err == io.EOF { - return - } - panic(err) - } - switch v := tk.(type) { - case xml.StartElement: - if skipElement(v) { - if err := sc.d.Skip(); err != nil { - panic(err) - } - if v.Name.Local == "section" { - sub++ - } - break - } - switch v.Name.Local { - case "section": - sub++ - sc.readSection(append(sec, sub)) - case "xref": - buf.Write(sc.readXRef(v)) - } - case xml.CharData: - if len(sec) == 0 { - break - } - buf.Write(v) - case xml.EndElement: - if v.Name.Local == "section" { - sc.addSentences(joinSection(sec), buf.String()) - return - } - } - } -} - -func (sc specCoverage) readXRef(se xml.StartElement) []byte { - var b []byte - for { - tk, err := sc.d.Token() - if err != nil { - panic(err) - } - switch v := tk.(type) { - case xml.CharData: - if b != nil { - panic("unexpected CharData") - } - b = []byte(string(v)) - case xml.EndElement: - if v.Name.Local != "xref" { - panic("expected ") - } - if b != nil { - return b - } - sig := attrSig(se) - switch sig { - case "target": - return []byte(fmt.Sprintf("[%s]", attrValue(se, "target"))) - case "fmt-of,rel,target", "fmt-,,rel,target": - return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel"))) - case "fmt-of,sec,target", "fmt-,,sec,target": - return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target"))) - case "fmt-of,rel,sec,target": - return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel"))) - default: - panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se))) - } - default: - panic(fmt.Sprintf("unexpected tag %q", v)) - } - } -} - -var skipAnchor = map[string]bool{ - "intro": true, - "Overview": true, -} - -var skipTitle = map[string]bool{ - "Acknowledgements": true, - "Change Log": true, - "Document Organization": true, - "Conventions and Terminology": true, -} - -func skipElement(s xml.StartElement) bool { - switch s.Name.Local { - case "artwork": - return true - case "section": - for _, attr := range s.Attr { - switch attr.Name.Local { - case "anchor": - if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") { - return true - } - case "title": - if skipTitle[attr.Value] { - return true - } - } - } - } - return false -} - -func readSpecCov(r io.Reader) specCoverage { - sc := specCoverage{ - coverage: map[specPart]bool{}, - d: xml.NewDecoder(r)} - sc.readSection(nil) - return sc -} - -func (sc specCoverage) addSentences(sec string, sentence string) { - for _, s := range parseSentences(sentence) { - sc.coverage[specPart{sec, s}] = false - } -} - -func (sc specCoverage) cover(sec string, sentence string) { - for _, s := range parseSentences(sentence) { - p := specPart{sec, s} - if _, ok := sc.coverage[p]; !ok { - panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s)) - } - sc.coverage[specPart{sec, s}] = true - } - -} - -var whitespaceRx = regexp.MustCompile(`\s+`) - -func parseSentences(sens string) []string { - sens = strings.TrimSpace(sens) - if sens == "" { - return nil - } - ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ") - for i, s := range ss { - s = strings.TrimSpace(s) - if !strings.HasSuffix(s, ".") { - s += "." - } - ss[i] = s - } - return ss -} - -func TestSpecParseSentences(t *testing.T) { - tests := []struct { - ss string - want []string - }{ - {"Sentence 1. Sentence 2.", - []string{ - "Sentence 1.", - "Sentence 2.", - }}, - {"Sentence 1. \nSentence 2.\tSentence 3.", - []string{ - "Sentence 1.", - "Sentence 2.", - "Sentence 3.", - }}, - } - - for i, tt := range tests { - got := parseSentences(tt.ss) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%d: got = %q, want %q", i, got, tt.want) - } - } -} - -func TestSpecCoverage(t *testing.T) { - if !*coverSpec { - t.Skip() - } - - loadSpecOnce.Do(loadSpec) - - var ( - list []specPart - cv = defaultSpecCoverage.coverage - total = len(cv) - complete = 0 - ) - - for sp, touched := range defaultSpecCoverage.coverage { - if touched { - complete++ - } else { - list = append(list, sp) - } - } - sort.Stable(bySpecSection(list)) - - if testing.Short() && len(list) > 5 { - list = list[:5] - } - - for _, p := range list { - t.Errorf("\tSECTION %s: %s", p.section, p.sentence) - } - - t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100) -} - -func attrSig(se xml.StartElement) string { - var names []string - for _, attr := range se.Attr { - if attr.Name.Local == "fmt" { - names = append(names, "fmt-"+attr.Value) - } else { - names = append(names, attr.Name.Local) - } - } - sort.Strings(names) - return strings.Join(names, ",") -} - -func attrValue(se xml.StartElement, attr string) string { - for _, a := range se.Attr { - if a.Name.Local == attr { - return a.Value - } - } - panic("unknown attribute " + attr) -} - -func TestSpecPartLess(t *testing.T) { - tests := []struct { - sec1, sec2 string - want bool - }{ - {"6.2.1", "6.2", false}, - {"6.2", "6.2.1", true}, - {"6.10", "6.10.1", true}, - {"6.10", "6.1.1", false}, // 10, not 1 - {"6.1", "6.1", false}, // equal, so not less - } - for _, tt := range tests { - got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"}) - if got != tt.want { - t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want) - } - } -} diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go deleted file mode 100644 index 75db991d..00000000 --- a/vendor/golang.org/x/net/icmp/dstunreach.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -// A DstUnreach represents an ICMP destination unreachable message -// body. -type DstUnreach struct { - Data []byte // data, known as original datagram field - Extensions []Extension // extensions -} - -// Len implements the Len method of MessageBody interface. -func (p *DstUnreach) Len(proto int) int { - if p == nil { - return 0 - } - l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) - return 4 + l -} - -// Marshal implements the Marshal method of MessageBody interface. -func (p *DstUnreach) Marshal(proto int) ([]byte, error) { - return marshalMultipartMessageBody(proto, p.Data, p.Extensions) -} - -// parseDstUnreach parses b as an ICMP destination unreachable message -// body. -func parseDstUnreach(proto int, b []byte) (MessageBody, error) { - if len(b) < 4 { - return nil, errMessageTooShort - } - p := &DstUnreach{} - var err error - p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) - if err != nil { - return nil, err - } - return p, nil -} diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go deleted file mode 100644 index e6f15efd..00000000 --- a/vendor/golang.org/x/net/icmp/echo.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import "encoding/binary" - -// An Echo represents an ICMP echo request or reply message body. -type Echo struct { - ID int // identifier - Seq int // sequence number - Data []byte // data -} - -// Len implements the Len method of MessageBody interface. -func (p *Echo) Len(proto int) int { - if p == nil { - return 0 - } - return 4 + len(p.Data) -} - -// Marshal implements the Marshal method of MessageBody interface. -func (p *Echo) Marshal(proto int) ([]byte, error) { - b := make([]byte, 4+len(p.Data)) - binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) - binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) - copy(b[4:], p.Data) - return b, nil -} - -// parseEcho parses b as an ICMP echo request or reply message body. -func parseEcho(proto int, b []byte) (MessageBody, error) { - bodyLen := len(b) - if bodyLen < 4 { - return nil, errMessageTooShort - } - p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} - if bodyLen > 4 { - p.Data = make([]byte, bodyLen-4) - copy(p.Data, b[4:]) - } - return p, nil -} diff --git a/vendor/golang.org/x/net/icmp/endpoint.go b/vendor/golang.org/x/net/icmp/endpoint.go deleted file mode 100644 index a68bfb01..00000000 --- a/vendor/golang.org/x/net/icmp/endpoint.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import ( - "net" - "runtime" - "syscall" - "time" - - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -var _ net.PacketConn = &PacketConn{} - -// A PacketConn represents a packet network endpoint that uses either -// ICMPv4 or ICMPv6. -type PacketConn struct { - c net.PacketConn - p4 *ipv4.PacketConn - p6 *ipv6.PacketConn -} - -func (c *PacketConn) ok() bool { return c != nil && c.c != nil } - -// IPv4PacketConn returns the ipv4.PacketConn of c. -// It returns nil when c is not created as the endpoint for ICMPv4. -func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn { - if !c.ok() { - return nil - } - return c.p4 -} - -// IPv6PacketConn returns the ipv6.PacketConn of c. -// It returns nil when c is not created as the endpoint for ICMPv6. -func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn { - if !c.ok() { - return nil - } - return c.p6 -} - -// ReadFrom reads an ICMP message from the connection. -func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { - if !c.ok() { - return 0, nil, syscall.EINVAL - } - // Please be informed that ipv4.NewPacketConn enables - // IP_STRIPHDR option by default on Darwin. - // See golang.org/issue/9395 for further information. - if runtime.GOOS == "darwin" && c.p4 != nil { - n, _, peer, err := c.p4.ReadFrom(b) - return n, peer, err - } - return c.c.ReadFrom(b) -} - -// WriteTo writes the ICMP message b to dst. -// Dst must be net.UDPAddr when c is a non-privileged -// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr. -func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) { - if !c.ok() { - return 0, syscall.EINVAL - } - return c.c.WriteTo(b, dst) -} - -// Close closes the endpoint. -func (c *PacketConn) Close() error { - if !c.ok() { - return syscall.EINVAL - } - return c.c.Close() -} - -// LocalAddr returns the local network address. -func (c *PacketConn) LocalAddr() net.Addr { - if !c.ok() { - return nil - } - return c.c.LocalAddr() -} - -// SetDeadline sets the read and write deadlines associated with the -// endpoint. -func (c *PacketConn) SetDeadline(t time.Time) error { - if !c.ok() { - return syscall.EINVAL - } - return c.c.SetDeadline(t) -} - -// SetReadDeadline sets the read deadline associated with the -// endpoint. -func (c *PacketConn) SetReadDeadline(t time.Time) error { - if !c.ok() { - return syscall.EINVAL - } - return c.c.SetReadDeadline(t) -} - -// SetWriteDeadline sets the write deadline associated with the -// endpoint. -func (c *PacketConn) SetWriteDeadline(t time.Time) error { - if !c.ok() { - return syscall.EINVAL - } - return c.c.SetWriteDeadline(t) -} diff --git a/vendor/golang.org/x/net/icmp/example_test.go b/vendor/golang.org/x/net/icmp/example_test.go deleted file mode 100644 index 1df4cecc..00000000 --- a/vendor/golang.org/x/net/icmp/example_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp_test - -import ( - "log" - "net" - "os" - "runtime" - - "golang.org/x/net/icmp" - "golang.org/x/net/ipv6" -) - -func ExamplePacketConn_nonPrivilegedPing() { - switch runtime.GOOS { - case "darwin": - case "linux": - log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state") - default: - log.Println("not supported on", runtime.GOOS) - return - } - - c, err := icmp.ListenPacket("udp6", "fe80::1%en0") - if err != nil { - log.Fatal(err) - } - defer c.Close() - - wm := icmp.Message{ - Type: ipv6.ICMPTypeEchoRequest, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: 1, - Data: []byte("HELLO-R-U-THERE"), - }, - } - wb, err := wm.Marshal(nil) - if err != nil { - log.Fatal(err) - } - if _, err := c.WriteTo(wb, &net.UDPAddr{IP: net.ParseIP("ff02::1"), Zone: "en0"}); err != nil { - log.Fatal(err) - } - - rb := make([]byte, 1500) - n, peer, err := c.ReadFrom(rb) - if err != nil { - log.Fatal(err) - } - rm, err := icmp.ParseMessage(58, rb[:n]) - if err != nil { - log.Fatal(err) - } - switch rm.Type { - case ipv6.ICMPTypeEchoReply: - log.Printf("got reflection from %v", peer) - default: - log.Printf("got %+v; want echo reply", rm) - } -} diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go deleted file mode 100644 index 402a7514..00000000 --- a/vendor/golang.org/x/net/icmp/extension.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import "encoding/binary" - -// An Extension represents an ICMP extension. -type Extension interface { - // Len returns the length of ICMP extension. - // Proto must be either the ICMPv4 or ICMPv6 protocol number. - Len(proto int) int - - // Marshal returns the binary encoding of ICMP extension. - // Proto must be either the ICMPv4 or ICMPv6 protocol number. - Marshal(proto int) ([]byte, error) -} - -const extensionVersion = 2 - -func validExtensionHeader(b []byte) bool { - v := int(b[0]&0xf0) >> 4 - s := binary.BigEndian.Uint16(b[2:4]) - if s != 0 { - s = checksum(b) - } - if v != extensionVersion || s != 0 { - return false - } - return true -} - -// parseExtensions parses b as a list of ICMP extensions. -// The length attribute l must be the length attribute field in -// received icmp messages. -// -// It will return a list of ICMP extensions and an adjusted length -// attribute that represents the length of the padded original -// datagram field. Otherwise, it returns an error. -func parseExtensions(b []byte, l int) ([]Extension, int, error) { - // Still a lot of non-RFC 4884 compliant implementations are - // out there. Set the length attribute l to 128 when it looks - // inappropriate for backwards compatibility. - // - // A minimal extension at least requires 8 octets; 4 octets - // for an extension header, and 4 octets for a single object - // header. - // - // See RFC 4884 for further information. - if 128 > l || l+8 > len(b) { - l = 128 - } - if l+8 > len(b) { - return nil, -1, errNoExtension - } - if !validExtensionHeader(b[l:]) { - if l == 128 { - return nil, -1, errNoExtension - } - l = 128 - if !validExtensionHeader(b[l:]) { - return nil, -1, errNoExtension - } - } - var exts []Extension - for b = b[l+4:]; len(b) >= 4; { - ol := int(binary.BigEndian.Uint16(b[:2])) - if 4 > ol || ol > len(b) { - break - } - switch b[2] { - case classMPLSLabelStack: - ext, err := parseMPLSLabelStack(b[:ol]) - if err != nil { - return nil, -1, err - } - exts = append(exts, ext) - case classInterfaceInfo: - ext, err := parseInterfaceInfo(b[:ol]) - if err != nil { - return nil, -1, err - } - exts = append(exts, ext) - } - b = b[ol:] - } - return exts, l, nil -} diff --git a/vendor/golang.org/x/net/icmp/extension_test.go b/vendor/golang.org/x/net/icmp/extension_test.go deleted file mode 100644 index 0b3f7b9e..00000000 --- a/vendor/golang.org/x/net/icmp/extension_test.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import ( - "net" - "reflect" - "testing" - - "golang.org/x/net/internal/iana" -) - -var marshalAndParseExtensionTests = []struct { - proto int - hdr []byte - obj []byte - exts []Extension -}{ - // MPLS label stack with no label - { - proto: iana.ProtocolICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x04, 0x01, 0x01, - }, - exts: []Extension{ - &MPLSLabelStack{ - Class: classMPLSLabelStack, - Type: typeIncomingMPLSLabelStack, - }, - }, - }, - // MPLS label stack with a single label - { - proto: iana.ProtocolIPv6ICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x08, 0x01, 0x01, - 0x03, 0xe8, 0xe9, 0xff, - }, - exts: []Extension{ - &MPLSLabelStack{ - Class: classMPLSLabelStack, - Type: typeIncomingMPLSLabelStack, - Labels: []MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - }, - }, - // MPLS label stack with multiple labels - { - proto: iana.ProtocolICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x0c, 0x01, 0x01, - 0x03, 0xe8, 0xde, 0xfe, - 0x03, 0xe8, 0xe1, 0xff, - }, - exts: []Extension{ - &MPLSLabelStack{ - Class: classMPLSLabelStack, - Type: typeIncomingMPLSLabelStack, - Labels: []MPLSLabel{ - { - Label: 16013, - TC: 0x7, - S: false, - TTL: 254, - }, - { - Label: 16014, - TC: 0, - S: true, - TTL: 255, - }, - }, - }, - }, - }, - // Interface information with no attribute - { - proto: iana.ProtocolICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x04, 0x02, 0x00, - }, - exts: []Extension{ - &InterfaceInfo{ - Class: classInterfaceInfo, - }, - }, - }, - // Interface information with ifIndex and name - { - proto: iana.ProtocolICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x10, 0x02, 0x0a, - 0x00, 0x00, 0x00, 0x10, - 0x08, byte('e'), byte('n'), byte('1'), - byte('0'), byte('1'), 0x00, 0x00, - }, - exts: []Extension{ - &InterfaceInfo{ - Class: classInterfaceInfo, - Type: 0x0a, - Interface: &net.Interface{ - Index: 16, - Name: "en101", - }, - }, - }, - }, - // Interface information with ifIndex, IPAddr, name and MTU - { - proto: iana.ProtocolIPv6ICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x28, 0x02, 0x0f, - 0x00, 0x00, 0x00, 0x0f, - 0x00, 0x02, 0x00, 0x00, - 0xfe, 0x80, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x08, byte('e'), byte('n'), byte('1'), - byte('0'), byte('1'), 0x00, 0x00, - 0x00, 0x00, 0x20, 0x00, - }, - exts: []Extension{ - &InterfaceInfo{ - Class: classInterfaceInfo, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.ParseIP("fe80::1"), - Zone: "en101", - }, - }, - }, - }, -} - -func TestMarshalAndParseExtension(t *testing.T) { - for i, tt := range marshalAndParseExtensionTests { - for j, ext := range tt.exts { - var err error - var b []byte - switch ext := ext.(type) { - case *MPLSLabelStack: - b, err = ext.Marshal(tt.proto) - if err != nil { - t.Errorf("#%v/%v: %v", i, j, err) - continue - } - case *InterfaceInfo: - b, err = ext.Marshal(tt.proto) - if err != nil { - t.Errorf("#%v/%v: %v", i, j, err) - continue - } - } - if !reflect.DeepEqual(b, tt.obj) { - t.Errorf("#%v/%v: got %#v; want %#v", i, j, b, tt.obj) - continue - } - } - - for j, wire := range []struct { - data []byte // original datagram - inlattr int // length of padded original datagram, a hint - outlattr int // length of padded original datagram, a want - err error - }{ - {nil, 0, -1, errNoExtension}, - {make([]byte, 127), 128, -1, errNoExtension}, - - {make([]byte, 128), 127, -1, errNoExtension}, - {make([]byte, 128), 128, -1, errNoExtension}, - {make([]byte, 128), 129, -1, errNoExtension}, - - {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 127, 128, nil}, - {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 128, 128, nil}, - {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 129, 128, nil}, - - {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 511, -1, errNoExtension}, - {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 512, 512, nil}, - {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 513, -1, errNoExtension}, - } { - exts, l, err := parseExtensions(wire.data, wire.inlattr) - if err != wire.err { - t.Errorf("#%v/%v: got %v; want %v", i, j, err, wire.err) - continue - } - if wire.err != nil { - continue - } - if l != wire.outlattr { - t.Errorf("#%v/%v: got %v; want %v", i, j, l, wire.outlattr) - } - if !reflect.DeepEqual(exts, tt.exts) { - for j, ext := range exts { - switch ext := ext.(type) { - case *MPLSLabelStack: - want := tt.exts[j].(*MPLSLabelStack) - t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) - case *InterfaceInfo: - want := tt.exts[j].(*InterfaceInfo) - t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) - } - } - continue - } - } - } -} - -var parseInterfaceNameTests = []struct { - b []byte - error -}{ - {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, - {[]byte{4, 'e', 'n', '0'}, nil}, - {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, - {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, -} - -func TestParseInterfaceName(t *testing.T) { - ifi := InterfaceInfo{Interface: &net.Interface{}} - for i, tt := range parseInterfaceNameTests { - if _, err := ifi.parseName(tt.b); err != tt.error { - t.Errorf("#%d: got %v; want %v", i, err, tt.error) - } - } -} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go deleted file mode 100644 index 398fd388..00000000 --- a/vendor/golang.org/x/net/icmp/helper_posix.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows - -package icmp - -import ( - "net" - "strconv" - "syscall" -) - -func sockaddr(family int, address string) (syscall.Sockaddr, error) { - switch family { - case syscall.AF_INET: - a, err := net.ResolveIPAddr("ip4", address) - if err != nil { - return nil, err - } - if len(a.IP) == 0 { - a.IP = net.IPv4zero - } - if a.IP = a.IP.To4(); a.IP == nil { - return nil, net.InvalidAddrError("non-ipv4 address") - } - sa := &syscall.SockaddrInet4{} - copy(sa.Addr[:], a.IP) - return sa, nil - case syscall.AF_INET6: - a, err := net.ResolveIPAddr("ip6", address) - if err != nil { - return nil, err - } - if len(a.IP) == 0 { - a.IP = net.IPv6unspecified - } - if a.IP.Equal(net.IPv4zero) { - a.IP = net.IPv6unspecified - } - if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil { - return nil, net.InvalidAddrError("non-ipv6 address") - } - sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)} - copy(sa.Addr[:], a.IP) - return sa, nil - default: - return nil, net.InvalidAddrError("unexpected family") - } -} - -func zoneToUint32(zone string) uint32 { - if zone == "" { - return 0 - } - if ifi, err := net.InterfaceByName(zone); err == nil { - return uint32(ifi.Index) - } - n, err := strconv.Atoi(zone) - if err != nil { - return 0 - } - return uint32(n) -} - -func last(s string, b byte) int { - i := len(s) - for i--; i >= 0; i-- { - if s[i] == b { - break - } - } - return i -} diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go deleted file mode 100644 index 78b5b98b..00000000 --- a/vendor/golang.org/x/net/icmp/interface.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import ( - "encoding/binary" - "net" - "strings" - - "golang.org/x/net/internal/iana" -) - -const ( - classInterfaceInfo = 2 - - afiIPv4 = 1 - afiIPv6 = 2 -) - -const ( - attrMTU = 1 << iota - attrName - attrIPAddr - attrIfIndex -) - -// An InterfaceInfo represents interface and next-hop identification. -type InterfaceInfo struct { - Class int // extension object class number - Type int // extension object sub-type - Interface *net.Interface - Addr *net.IPAddr -} - -func (ifi *InterfaceInfo) nameLen() int { - if len(ifi.Interface.Name) > 63 { - return 64 - } - l := 1 + len(ifi.Interface.Name) - return (l + 3) &^ 3 -} - -func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) { - l = 4 - if ifi.Interface != nil && ifi.Interface.Index > 0 { - attrs |= attrIfIndex - l += 4 - if len(ifi.Interface.Name) > 0 { - attrs |= attrName - l += ifi.nameLen() - } - if ifi.Interface.MTU > 0 { - attrs |= attrMTU - l += 4 - } - } - if ifi.Addr != nil { - switch proto { - case iana.ProtocolICMP: - if ifi.Addr.IP.To4() != nil { - attrs |= attrIPAddr - l += 4 + net.IPv4len - } - case iana.ProtocolIPv6ICMP: - if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { - attrs |= attrIPAddr - l += 4 + net.IPv6len - } - } - } - return -} - -// Len implements the Len method of Extension interface. -func (ifi *InterfaceInfo) Len(proto int) int { - _, l := ifi.attrsAndLen(proto) - return l -} - -// Marshal implements the Marshal method of Extension interface. -func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) { - attrs, l := ifi.attrsAndLen(proto) - b := make([]byte, l) - if err := ifi.marshal(proto, b, attrs, l); err != nil { - return nil, err - } - return b, nil -} - -func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { - binary.BigEndian.PutUint16(b[:2], uint16(l)) - b[2], b[3] = classInterfaceInfo, byte(ifi.Type) - for b = b[4:]; len(b) > 0 && attrs != 0; { - switch { - case attrs&attrIfIndex != 0: - b = ifi.marshalIfIndex(proto, b) - attrs &^= attrIfIndex - case attrs&attrIPAddr != 0: - b = ifi.marshalIPAddr(proto, b) - attrs &^= attrIPAddr - case attrs&attrName != 0: - b = ifi.marshalName(proto, b) - attrs &^= attrName - case attrs&attrMTU != 0: - b = ifi.marshalMTU(proto, b) - attrs &^= attrMTU - } - } - return nil -} - -func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { - binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) - return b[4:] -} - -func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { - if len(b) < 4 { - return nil, errMessageTooShort - } - ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) - return b[4:], nil -} - -func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { - switch proto { - case iana.ProtocolICMP: - binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4)) - copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) - b = b[4+net.IPv4len:] - case iana.ProtocolIPv6ICMP: - binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6)) - copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) - b = b[4+net.IPv6len:] - } - return b -} - -func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { - if len(b) < 4 { - return nil, errMessageTooShort - } - afi := int(binary.BigEndian.Uint16(b[:2])) - b = b[4:] - switch afi { - case afiIPv4: - if len(b) < net.IPv4len { - return nil, errMessageTooShort - } - ifi.Addr.IP = make(net.IP, net.IPv4len) - copy(ifi.Addr.IP, b[:net.IPv4len]) - b = b[net.IPv4len:] - case afiIPv6: - if len(b) < net.IPv6len { - return nil, errMessageTooShort - } - ifi.Addr.IP = make(net.IP, net.IPv6len) - copy(ifi.Addr.IP, b[:net.IPv6len]) - b = b[net.IPv6len:] - } - return b, nil -} - -func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte { - l := byte(ifi.nameLen()) - b[0] = l - copy(b[1:], []byte(ifi.Interface.Name)) - return b[l:] -} - -func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) { - if 4 > len(b) || len(b) < int(b[0]) { - return nil, errMessageTooShort - } - l := int(b[0]) - if l%4 != 0 || 4 > l || l > 64 { - return nil, errInvalidExtension - } - var name [63]byte - copy(name[:], b[1:l]) - ifi.Interface.Name = strings.Trim(string(name[:]), "\000") - return b[l:], nil -} - -func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { - binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) - return b[4:] -} - -func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) { - if len(b) < 4 { - return nil, errMessageTooShort - } - ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) - return b[4:], nil -} - -func parseInterfaceInfo(b []byte) (Extension, error) { - ifi := &InterfaceInfo{ - Class: int(b[2]), - Type: int(b[3]), - } - if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 { - ifi.Interface = &net.Interface{} - } - if ifi.Type&attrIPAddr != 0 { - ifi.Addr = &net.IPAddr{} - } - attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU) - for b = b[4:]; len(b) > 0 && attrs != 0; { - var err error - switch { - case attrs&attrIfIndex != 0: - b, err = ifi.parseIfIndex(b) - attrs &^= attrIfIndex - case attrs&attrIPAddr != 0: - b, err = ifi.parseIPAddr(b) - attrs &^= attrIPAddr - case attrs&attrName != 0: - b, err = ifi.parseName(b) - attrs &^= attrName - case attrs&attrMTU != 0: - b, err = ifi.parseMTU(b) - attrs &^= attrMTU - } - if err != nil { - return nil, err - } - } - if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { - ifi.Addr.Zone = ifi.Interface.Name - } - return ifi, nil -} diff --git a/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/golang.org/x/net/icmp/ipv4.go deleted file mode 100644 index ffc66ed4..00000000 --- a/vendor/golang.org/x/net/icmp/ipv4.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import ( - "encoding/binary" - "net" - "runtime" - - "golang.org/x/net/internal/socket" - "golang.org/x/net/ipv4" -) - -// freebsdVersion is set in sys_freebsd.go. -// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. -var freebsdVersion uint32 - -// ParseIPv4Header parses b as an IPv4 header of ICMP error message -// invoking packet, which is contained in ICMP error message. -func ParseIPv4Header(b []byte) (*ipv4.Header, error) { - if len(b) < ipv4.HeaderLen { - return nil, errHeaderTooShort - } - hdrlen := int(b[0]&0x0f) << 2 - if hdrlen > len(b) { - return nil, errBufferTooShort - } - h := &ipv4.Header{ - Version: int(b[0] >> 4), - Len: hdrlen, - TOS: int(b[1]), - ID: int(binary.BigEndian.Uint16(b[4:6])), - FragOff: int(binary.BigEndian.Uint16(b[6:8])), - TTL: int(b[8]), - Protocol: int(b[9]), - Checksum: int(binary.BigEndian.Uint16(b[10:12])), - Src: net.IPv4(b[12], b[13], b[14], b[15]), - Dst: net.IPv4(b[16], b[17], b[18], b[19]), - } - switch runtime.GOOS { - case "darwin": - h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) - case "freebsd": - if freebsdVersion >= 1000000 { - h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) - } else { - h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) - } - default: - h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) - } - h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 - h.FragOff = h.FragOff & 0x1fff - if hdrlen-ipv4.HeaderLen > 0 { - h.Options = make([]byte, hdrlen-ipv4.HeaderLen) - copy(h.Options, b[ipv4.HeaderLen:]) - } - return h, nil -} diff --git a/vendor/golang.org/x/net/icmp/ipv4_test.go b/vendor/golang.org/x/net/icmp/ipv4_test.go deleted file mode 100644 index 058953f4..00000000 --- a/vendor/golang.org/x/net/icmp/ipv4_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import ( - "encoding/binary" - "net" - "reflect" - "runtime" - "testing" - - "golang.org/x/net/internal/socket" - "golang.org/x/net/ipv4" -) - -type ipv4HeaderTest struct { - wireHeaderFromKernel [ipv4.HeaderLen]byte - wireHeaderFromTradBSDKernel [ipv4.HeaderLen]byte - Header *ipv4.Header -} - -var ipv4HeaderLittleEndianTest = ipv4HeaderTest{ - // TODO(mikio): Add platform dependent wire header formats when - // we support new platforms. - wireHeaderFromKernel: [ipv4.HeaderLen]byte{ - 0x45, 0x01, 0xbe, 0xef, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - wireHeaderFromTradBSDKernel: [ipv4.HeaderLen]byte{ - 0x45, 0x01, 0xef, 0xbe, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - Header: &ipv4.Header{ - Version: ipv4.Version, - Len: ipv4.HeaderLen, - TOS: 1, - TotalLen: 0xbeef, - ID: 0xcafe, - Flags: ipv4.DontFragment, - FragOff: 1500, - TTL: 255, - Protocol: 1, - Checksum: 0xdead, - Src: net.IPv4(172, 16, 254, 254), - Dst: net.IPv4(192, 168, 0, 1), - }, -} - -func TestParseIPv4Header(t *testing.T) { - tt := &ipv4HeaderLittleEndianTest - if socket.NativeEndian != binary.LittleEndian { - t.Skip("no test for non-little endian machine yet") - } - - var wh []byte - switch runtime.GOOS { - case "darwin": - wh = tt.wireHeaderFromTradBSDKernel[:] - case "freebsd": - if freebsdVersion >= 1000000 { - wh = tt.wireHeaderFromKernel[:] - } else { - wh = tt.wireHeaderFromTradBSDKernel[:] - } - default: - wh = tt.wireHeaderFromKernel[:] - } - h, err := ParseIPv4Header(wh) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(h, tt.Header) { - t.Fatalf("got %#v; want %#v", h, tt.Header) - } -} diff --git a/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/golang.org/x/net/icmp/ipv6.go deleted file mode 100644 index 2e8cfeb1..00000000 --- a/vendor/golang.org/x/net/icmp/ipv6.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import ( - "net" - - "golang.org/x/net/internal/iana" -) - -const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 - -// IPv6PseudoHeader returns an IPv6 pseudo header for checksum -// calculation. -func IPv6PseudoHeader(src, dst net.IP) []byte { - b := make([]byte, ipv6PseudoHeaderLen) - copy(b, src.To16()) - copy(b[net.IPv6len:], dst.To16()) - b[len(b)-1] = byte(iana.ProtocolIPv6ICMP) - return b -} diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go deleted file mode 100644 index 7fac4f96..00000000 --- a/vendor/golang.org/x/net/icmp/listen_posix.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows - -package icmp - -import ( - "net" - "os" - "runtime" - "syscall" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option - -// ListenPacket listens for incoming ICMP packets addressed to -// address. See net.Dial for the syntax of address. -// -// For non-privileged datagram-oriented ICMP endpoints, network must -// be "udp4" or "udp6". The endpoint allows to read, write a few -// limited ICMP messages such as echo request and echo reply. -// Currently only Darwin and Linux support this. -// -// Examples: -// ListenPacket("udp4", "192.168.0.1") -// ListenPacket("udp4", "0.0.0.0") -// ListenPacket("udp6", "fe80::1%en0") -// ListenPacket("udp6", "::") -// -// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" -// followed by a colon and an ICMP protocol number or name. -// -// Examples: -// ListenPacket("ip4:icmp", "192.168.0.1") -// ListenPacket("ip4:1", "0.0.0.0") -// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") -// ListenPacket("ip6:58", "::") -func ListenPacket(network, address string) (*PacketConn, error) { - var family, proto int - switch network { - case "udp4": - family, proto = syscall.AF_INET, iana.ProtocolICMP - case "udp6": - family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP - default: - i := last(network, ':') - switch network[:i] { - case "ip4": - proto = iana.ProtocolICMP - case "ip6": - proto = iana.ProtocolIPv6ICMP - } - } - var cerr error - var c net.PacketConn - switch family { - case syscall.AF_INET, syscall.AF_INET6: - s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto) - if err != nil { - return nil, os.NewSyscallError("socket", err) - } - if runtime.GOOS == "darwin" && family == syscall.AF_INET { - if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { - syscall.Close(s) - return nil, os.NewSyscallError("setsockopt", err) - } - } - sa, err := sockaddr(family, address) - if err != nil { - syscall.Close(s) - return nil, err - } - if err := syscall.Bind(s, sa); err != nil { - syscall.Close(s) - return nil, os.NewSyscallError("bind", err) - } - f := os.NewFile(uintptr(s), "datagram-oriented icmp") - c, cerr = net.FilePacketConn(f) - f.Close() - default: - c, cerr = net.ListenPacket(network, address) - } - if cerr != nil { - return nil, cerr - } - switch proto { - case iana.ProtocolICMP: - return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil - case iana.ProtocolIPv6ICMP: - return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil - default: - return &PacketConn{c: c}, nil - } -} diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go deleted file mode 100644 index 668728d1..00000000 --- a/vendor/golang.org/x/net/icmp/listen_stub.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 - -package icmp - -// ListenPacket listens for incoming ICMP packets addressed to -// address. See net.Dial for the syntax of address. -// -// For non-privileged datagram-oriented ICMP endpoints, network must -// be "udp4" or "udp6". The endpoint allows to read, write a few -// limited ICMP messages such as echo request and echo reply. -// Currently only Darwin and Linux support this. -// -// Examples: -// ListenPacket("udp4", "192.168.0.1") -// ListenPacket("udp4", "0.0.0.0") -// ListenPacket("udp6", "fe80::1%en0") -// ListenPacket("udp6", "::") -// -// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" -// followed by a colon and an ICMP protocol number or name. -// -// Examples: -// ListenPacket("ip4:icmp", "192.168.0.1") -// ListenPacket("ip4:1", "0.0.0.0") -// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") -// ListenPacket("ip6:58", "::") -func ListenPacket(network, address string) (*PacketConn, error) { - return nil, errOpNoSupport -} diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go deleted file mode 100644 index 81140b0d..00000000 --- a/vendor/golang.org/x/net/icmp/message.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package icmp provides basic functions for the manipulation of -// messages used in the Internet Control Message Protocols, -// ICMPv4 and ICMPv6. -// -// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443. -// Multi-part message support for ICMP is defined in RFC 4884. -// ICMP extensions for MPLS are defined in RFC 4950. -// ICMP extensions for interface and next-hop identification are -// defined in RFC 5837. -package icmp // import "golang.org/x/net/icmp" - -import ( - "encoding/binary" - "errors" - "net" - "syscall" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -// BUG(mikio): This package is not implemented on NaCl and Plan 9. - -var ( - errMessageTooShort = errors.New("message too short") - errHeaderTooShort = errors.New("header too short") - errBufferTooShort = errors.New("buffer too short") - errOpNoSupport = errors.New("operation not supported") - errNoExtension = errors.New("no extension") - errInvalidExtension = errors.New("invalid extension") -) - -func checksum(b []byte) uint16 { - csumcv := len(b) - 1 // checksum coverage - s := uint32(0) - for i := 0; i < csumcv; i += 2 { - s += uint32(b[i+1])<<8 | uint32(b[i]) - } - if csumcv&1 == 0 { - s += uint32(b[csumcv]) - } - s = s>>16 + s&0xffff - s = s + s>>16 - return ^uint16(s) -} - -// A Type represents an ICMP message type. -type Type interface { - Protocol() int -} - -// A Message represents an ICMP message. -type Message struct { - Type Type // type, either ipv4.ICMPType or ipv6.ICMPType - Code int // code - Checksum int // checksum - Body MessageBody // body -} - -// Marshal returns the binary encoding of the ICMP message m. -// -// For an ICMPv4 message, the returned message always contains the -// calculated checksum field. -// -// For an ICMPv6 message, the returned message contains the calculated -// checksum field when psh is not nil, otherwise the kernel will -// compute the checksum field during the message transmission. -// When psh is not nil, it must be the pseudo header for IPv6. -func (m *Message) Marshal(psh []byte) ([]byte, error) { - var mtype int - switch typ := m.Type.(type) { - case ipv4.ICMPType: - mtype = int(typ) - case ipv6.ICMPType: - mtype = int(typ) - default: - return nil, syscall.EINVAL - } - b := []byte{byte(mtype), byte(m.Code), 0, 0} - if m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil { - b = append(psh, b...) - } - if m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 { - mb, err := m.Body.Marshal(m.Type.Protocol()) - if err != nil { - return nil, err - } - b = append(b, mb...) - } - if m.Type.Protocol() == iana.ProtocolIPv6ICMP { - if psh == nil { // cannot calculate checksum here - return b, nil - } - off, l := 2*net.IPv6len, len(b)-len(psh) - binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) - } - s := checksum(b) - // Place checksum back in header; using ^= avoids the - // assumption the checksum bytes are zero. - b[len(psh)+2] ^= byte(s) - b[len(psh)+3] ^= byte(s >> 8) - return b[len(psh):], nil -} - -var parseFns = map[Type]func(int, []byte) (MessageBody, error){ - ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach, - ipv4.ICMPTypeTimeExceeded: parseTimeExceeded, - ipv4.ICMPTypeParameterProblem: parseParamProb, - - ipv4.ICMPTypeEcho: parseEcho, - ipv4.ICMPTypeEchoReply: parseEcho, - - ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach, - ipv6.ICMPTypePacketTooBig: parsePacketTooBig, - ipv6.ICMPTypeTimeExceeded: parseTimeExceeded, - ipv6.ICMPTypeParameterProblem: parseParamProb, - - ipv6.ICMPTypeEchoRequest: parseEcho, - ipv6.ICMPTypeEchoReply: parseEcho, -} - -// ParseMessage parses b as an ICMP message. -// Proto must be either the ICMPv4 or ICMPv6 protocol number. -func ParseMessage(proto int, b []byte) (*Message, error) { - if len(b) < 4 { - return nil, errMessageTooShort - } - var err error - m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} - switch proto { - case iana.ProtocolICMP: - m.Type = ipv4.ICMPType(b[0]) - case iana.ProtocolIPv6ICMP: - m.Type = ipv6.ICMPType(b[0]) - default: - return nil, syscall.EINVAL - } - if fn, ok := parseFns[m.Type]; !ok { - m.Body, err = parseDefaultMessageBody(proto, b[4:]) - } else { - m.Body, err = fn(proto, b[4:]) - } - if err != nil { - return nil, err - } - return m, nil -} diff --git a/vendor/golang.org/x/net/icmp/message_test.go b/vendor/golang.org/x/net/icmp/message_test.go deleted file mode 100644 index 5d2605f8..00000000 --- a/vendor/golang.org/x/net/icmp/message_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp_test - -import ( - "net" - "reflect" - "testing" - - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -var marshalAndParseMessageForIPv4Tests = []icmp.Message{ - { - Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, - Body: &icmp.DstUnreach{ - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv4.ICMPTypeTimeExceeded, Code: 1, - Body: &icmp.TimeExceeded{ - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv4.ICMPTypeParameterProblem, Code: 2, - Body: &icmp.ParamProb{ - Pointer: 8, - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv4.ICMPTypeEcho, Code: 0, - Body: &icmp.Echo{ - ID: 1, Seq: 2, - Data: []byte("HELLO-R-U-THERE"), - }, - }, - { - Type: ipv4.ICMPTypePhoturis, - Body: &icmp.DefaultMessageBody{ - Data: []byte{0x80, 0x40, 0x20, 0x10}, - }, - }, -} - -func TestMarshalAndParseMessageForIPv4(t *testing.T) { - for i, tt := range marshalAndParseMessageForIPv4Tests { - b, err := tt.Marshal(nil) - if err != nil { - t.Fatal(err) - } - m, err := icmp.ParseMessage(iana.ProtocolICMP, b) - if err != nil { - t.Fatal(err) - } - if m.Type != tt.Type || m.Code != tt.Code { - t.Errorf("#%v: got %v; want %v", i, m, &tt) - } - if !reflect.DeepEqual(m.Body, tt.Body) { - t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) - } - } -} - -var marshalAndParseMessageForIPv6Tests = []icmp.Message{ - { - Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, - Body: &icmp.DstUnreach{ - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv6.ICMPTypePacketTooBig, Code: 0, - Body: &icmp.PacketTooBig{ - MTU: 1<<16 - 1, - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv6.ICMPTypeTimeExceeded, Code: 1, - Body: &icmp.TimeExceeded{ - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv6.ICMPTypeParameterProblem, Code: 2, - Body: &icmp.ParamProb{ - Pointer: 8, - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv6.ICMPTypeEchoRequest, Code: 0, - Body: &icmp.Echo{ - ID: 1, Seq: 2, - Data: []byte("HELLO-R-U-THERE"), - }, - }, - { - Type: ipv6.ICMPTypeDuplicateAddressConfirmation, - Body: &icmp.DefaultMessageBody{ - Data: []byte{0x80, 0x40, 0x20, 0x10}, - }, - }, -} - -func TestMarshalAndParseMessageForIPv6(t *testing.T) { - pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) - for i, tt := range marshalAndParseMessageForIPv6Tests { - for _, psh := range [][]byte{pshicmp, nil} { - b, err := tt.Marshal(psh) - if err != nil { - t.Fatal(err) - } - m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) - if err != nil { - t.Fatal(err) - } - if m.Type != tt.Type || m.Code != tt.Code { - t.Errorf("#%v: got %v; want %v", i, m, &tt) - } - if !reflect.DeepEqual(m.Body, tt.Body) { - t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) - } - } - } -} diff --git a/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/golang.org/x/net/icmp/messagebody.go deleted file mode 100644 index 2463730a..00000000 --- a/vendor/golang.org/x/net/icmp/messagebody.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -// A MessageBody represents an ICMP message body. -type MessageBody interface { - // Len returns the length of ICMP message body. - // Proto must be either the ICMPv4 or ICMPv6 protocol number. - Len(proto int) int - - // Marshal returns the binary encoding of ICMP message body. - // Proto must be either the ICMPv4 or ICMPv6 protocol number. - Marshal(proto int) ([]byte, error) -} - -// A DefaultMessageBody represents the default message body. -type DefaultMessageBody struct { - Data []byte // data -} - -// Len implements the Len method of MessageBody interface. -func (p *DefaultMessageBody) Len(proto int) int { - if p == nil { - return 0 - } - return len(p.Data) -} - -// Marshal implements the Marshal method of MessageBody interface. -func (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) { - return p.Data, nil -} - -// parseDefaultMessageBody parses b as an ICMP message body. -func parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) { - p := &DefaultMessageBody{Data: make([]byte, len(b))} - copy(p.Data, b) - return p, nil -} diff --git a/vendor/golang.org/x/net/icmp/mpls.go b/vendor/golang.org/x/net/icmp/mpls.go deleted file mode 100644 index c3149174..00000000 --- a/vendor/golang.org/x/net/icmp/mpls.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import "encoding/binary" - -// A MPLSLabel represents a MPLS label stack entry. -type MPLSLabel struct { - Label int // label value - TC int // traffic class; formerly experimental use - S bool // bottom of stack - TTL int // time to live -} - -const ( - classMPLSLabelStack = 1 - typeIncomingMPLSLabelStack = 1 -) - -// A MPLSLabelStack represents a MPLS label stack. -type MPLSLabelStack struct { - Class int // extension object class number - Type int // extension object sub-type - Labels []MPLSLabel -} - -// Len implements the Len method of Extension interface. -func (ls *MPLSLabelStack) Len(proto int) int { - return 4 + (4 * len(ls.Labels)) -} - -// Marshal implements the Marshal method of Extension interface. -func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) { - b := make([]byte, ls.Len(proto)) - if err := ls.marshal(proto, b); err != nil { - return nil, err - } - return b, nil -} - -func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { - l := ls.Len(proto) - binary.BigEndian.PutUint16(b[:2], uint16(l)) - b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack - off := 4 - for _, ll := range ls.Labels { - b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0) - b[off+2] |= byte(ll.TC << 1 & 0x0e) - if ll.S { - b[off+2] |= 0x1 - } - b[off+3] = byte(ll.TTL) - off += 4 - } - return nil -} - -func parseMPLSLabelStack(b []byte) (Extension, error) { - ls := &MPLSLabelStack{ - Class: int(b[2]), - Type: int(b[3]), - } - for b = b[4:]; len(b) >= 4; b = b[4:] { - ll := MPLSLabel{ - Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4, - TC: int(b[2]&0x0e) >> 1, - TTL: int(b[3]), - } - if b[2]&0x1 != 0 { - ll.S = true - } - ls.Labels = append(ls.Labels, ll) - } - return ls, nil -} diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go deleted file mode 100644 index f2713566..00000000 --- a/vendor/golang.org/x/net/icmp/multipart.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import "golang.org/x/net/internal/iana" - -// multipartMessageBodyDataLen takes b as an original datagram and -// exts as extensions, and returns a required length for message body -// and a required length for a padded original datagram in wire -// format. -func multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) { - for _, ext := range exts { - bodyLen += ext.Len(proto) - } - if bodyLen > 0 { - dataLen = multipartMessageOrigDatagramLen(proto, b) - bodyLen += 4 // length of extension header - } else { - dataLen = len(b) - } - bodyLen += dataLen - return bodyLen, dataLen -} - -// multipartMessageOrigDatagramLen takes b as an original datagram, -// and returns a required length for a padded orignal datagram in wire -// format. -func multipartMessageOrigDatagramLen(proto int, b []byte) int { - roundup := func(b []byte, align int) int { - // According to RFC 4884, the padded original datagram - // field must contain at least 128 octets. - if len(b) < 128 { - return 128 - } - r := len(b) - return (r + align - 1) & ^(align - 1) - } - switch proto { - case iana.ProtocolICMP: - return roundup(b, 4) - case iana.ProtocolIPv6ICMP: - return roundup(b, 8) - default: - return len(b) - } -} - -// marshalMultipartMessageBody takes data as an original datagram and -// exts as extesnsions, and returns a binary encoding of message body. -// It can be used for non-multipart message bodies when exts is nil. -func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) { - bodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts) - b := make([]byte, 4+bodyLen) - copy(b[4:], data) - off := dataLen + 4 - if len(exts) > 0 { - b[dataLen+4] = byte(extensionVersion << 4) - off += 4 // length of object header - for _, ext := range exts { - switch ext := ext.(type) { - case *MPLSLabelStack: - if err := ext.marshal(proto, b[off:]); err != nil { - return nil, err - } - off += ext.Len(proto) - case *InterfaceInfo: - attrs, l := ext.attrsAndLen(proto) - if err := ext.marshal(proto, b[off:], attrs, l); err != nil { - return nil, err - } - off += ext.Len(proto) - } - } - s := checksum(b[dataLen+4:]) - b[dataLen+4+2] ^= byte(s) - b[dataLen+4+3] ^= byte(s >> 8) - switch proto { - case iana.ProtocolICMP: - b[1] = byte(dataLen / 4) - case iana.ProtocolIPv6ICMP: - b[0] = byte(dataLen / 8) - } - } - return b, nil -} - -// parseMultipartMessageBody parses b as either a non-multipart -// message body or a multipart message body. -func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) { - var l int - switch proto { - case iana.ProtocolICMP: - l = 4 * int(b[1]) - case iana.ProtocolIPv6ICMP: - l = 8 * int(b[0]) - } - if len(b) == 4 { - return nil, nil, nil - } - exts, l, err := parseExtensions(b[4:], l) - if err != nil { - l = len(b) - 4 - } - data := make([]byte, l) - copy(data, b[4:]) - return data, exts, nil -} diff --git a/vendor/golang.org/x/net/icmp/multipart_test.go b/vendor/golang.org/x/net/icmp/multipart_test.go deleted file mode 100644 index 966ccb8d..00000000 --- a/vendor/golang.org/x/net/icmp/multipart_test.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp_test - -import ( - "fmt" - "net" - "reflect" - "testing" - - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{ - { - Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, - Body: &icmp.DstUnreach{ - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.IPv4(192, 168, 0, 1).To4(), - }, - }, - }, - }, - }, - { - Type: ipv4.ICMPTypeTimeExceeded, Code: 1, - Body: &icmp.TimeExceeded{ - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.IPv4(192, 168, 0, 1).To4(), - }, - }, - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - }, - }, - }, - { - Type: ipv4.ICMPTypeParameterProblem, Code: 2, - Body: &icmp.ParamProb{ - Pointer: 8, - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.IPv4(192, 168, 0, 1).To4(), - }, - }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x2f, - Interface: &net.Interface{ - Index: 16, - Name: "en102", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.IPv4(192, 168, 0, 2).To4(), - }, - }, - }, - }, - }, -} - -func TestMarshalAndParseMultipartMessageForIPv4(t *testing.T) { - for i, tt := range marshalAndParseMultipartMessageForIPv4Tests { - b, err := tt.Marshal(nil) - if err != nil { - t.Fatal(err) - } - if b[5] != 32 { - t.Errorf("#%v: got %v; want 32", i, b[5]) - } - m, err := icmp.ParseMessage(iana.ProtocolICMP, b) - if err != nil { - t.Fatal(err) - } - if m.Type != tt.Type || m.Code != tt.Code { - t.Errorf("#%v: got %v; want %v", i, m, &tt) - } - switch m.Type { - case ipv4.ICMPTypeDestinationUnreachable: - got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) - if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) - } - if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) - } - case ipv4.ICMPTypeTimeExceeded: - got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) - if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) - } - if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) - } - case ipv4.ICMPTypeParameterProblem: - got, want := m.Body.(*icmp.ParamProb), tt.Body.(*icmp.ParamProb) - if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) - } - if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) - } - } - } -} - -var marshalAndParseMultipartMessageForIPv6Tests = []icmp.Message{ - { - Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, - Body: &icmp.DstUnreach{ - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.ParseIP("fe80::1"), - Zone: "en101", - }, - }, - }, - }, - }, - { - Type: ipv6.ICMPTypeTimeExceeded, Code: 1, - Body: &icmp.TimeExceeded{ - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.ParseIP("fe80::1"), - Zone: "en101", - }, - }, - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x2f, - Interface: &net.Interface{ - Index: 16, - Name: "en102", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.ParseIP("fe80::1"), - Zone: "en102", - }, - }, - }, - }, - }, -} - -func TestMarshalAndParseMultipartMessageForIPv6(t *testing.T) { - pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) - for i, tt := range marshalAndParseMultipartMessageForIPv6Tests { - for _, psh := range [][]byte{pshicmp, nil} { - b, err := tt.Marshal(psh) - if err != nil { - t.Fatal(err) - } - if b[4] != 16 { - t.Errorf("#%v: got %v; want 16", i, b[4]) - } - m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) - if err != nil { - t.Fatal(err) - } - if m.Type != tt.Type || m.Code != tt.Code { - t.Errorf("#%v: got %v; want %v", i, m, &tt) - } - switch m.Type { - case ipv6.ICMPTypeDestinationUnreachable: - got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) - if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) - } - if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) - } - case ipv6.ICMPTypeTimeExceeded: - got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) - if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) - } - if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) - } - } - } - } -} - -func dumpExtensions(i int, gotExts, wantExts []icmp.Extension) string { - var s string - for j, got := range gotExts { - switch got := got.(type) { - case *icmp.MPLSLabelStack: - want := wantExts[j].(*icmp.MPLSLabelStack) - if !reflect.DeepEqual(got, want) { - s += fmt.Sprintf("#%v/%v: got %#v; want %#v\n", i, j, got, want) - } - case *icmp.InterfaceInfo: - want := wantExts[j].(*icmp.InterfaceInfo) - if !reflect.DeepEqual(got, want) { - s += fmt.Sprintf("#%v/%v: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, j, got, got.Interface, got.Addr, want, want.Interface, want.Addr) - } - } - } - return s[:len(s)-1] -} - -var multipartMessageBodyLenTests = []struct { - proto int - in icmp.MessageBody - out int -}{ - { - iana.ProtocolICMP, - &icmp.DstUnreach{ - Data: make([]byte, ipv4.HeaderLen), - }, - 4 + ipv4.HeaderLen, // unused and original datagram - }, - { - iana.ProtocolICMP, - &icmp.TimeExceeded{ - Data: make([]byte, ipv4.HeaderLen), - }, - 4 + ipv4.HeaderLen, // unused and original datagram - }, - { - iana.ProtocolICMP, - &icmp.ParamProb{ - Data: make([]byte, ipv4.HeaderLen), - }, - 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram - }, - - { - iana.ProtocolICMP, - &icmp.ParamProb{ - Data: make([]byte, ipv4.HeaderLen), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram - }, - { - iana.ProtocolICMP, - &icmp.ParamProb{ - Data: make([]byte, 128), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram - }, - { - iana.ProtocolICMP, - &icmp.ParamProb{ - Data: make([]byte, 129), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram - }, - - { - iana.ProtocolIPv6ICMP, - &icmp.DstUnreach{ - Data: make([]byte, ipv6.HeaderLen), - }, - 4 + ipv6.HeaderLen, // unused and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.PacketTooBig{ - Data: make([]byte, ipv6.HeaderLen), - }, - 4 + ipv6.HeaderLen, // mtu and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.TimeExceeded{ - Data: make([]byte, ipv6.HeaderLen), - }, - 4 + ipv6.HeaderLen, // unused and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.ParamProb{ - Data: make([]byte, ipv6.HeaderLen), - }, - 4 + ipv6.HeaderLen, // pointer and original datagram - }, - - { - iana.ProtocolIPv6ICMP, - &icmp.DstUnreach{ - Data: make([]byte, 127), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.DstUnreach{ - Data: make([]byte, 128), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.DstUnreach{ - Data: make([]byte, 129), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram - }, -} - -func TestMultipartMessageBodyLen(t *testing.T) { - for i, tt := range multipartMessageBodyLenTests { - if out := tt.in.Len(tt.proto); out != tt.out { - t.Errorf("#%d: got %d; want %d", i, out, tt.out) - } - } -} diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go deleted file mode 100644 index a1c9df7b..00000000 --- a/vendor/golang.org/x/net/icmp/packettoobig.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import "encoding/binary" - -// A PacketTooBig represents an ICMP packet too big message body. -type PacketTooBig struct { - MTU int // maximum transmission unit of the nexthop link - Data []byte // data, known as original datagram field -} - -// Len implements the Len method of MessageBody interface. -func (p *PacketTooBig) Len(proto int) int { - if p == nil { - return 0 - } - return 4 + len(p.Data) -} - -// Marshal implements the Marshal method of MessageBody interface. -func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { - b := make([]byte, 4+len(p.Data)) - binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) - copy(b[4:], p.Data) - return b, nil -} - -// parsePacketTooBig parses b as an ICMP packet too big message body. -func parsePacketTooBig(proto int, b []byte) (MessageBody, error) { - bodyLen := len(b) - if bodyLen < 4 { - return nil, errMessageTooShort - } - p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} - if bodyLen > 4 { - p.Data = make([]byte, bodyLen-4) - copy(p.Data, b[4:]) - } - return p, nil -} diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go deleted file mode 100644 index 0a2548da..00000000 --- a/vendor/golang.org/x/net/icmp/paramprob.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import ( - "encoding/binary" - "golang.org/x/net/internal/iana" -) - -// A ParamProb represents an ICMP parameter problem message body. -type ParamProb struct { - Pointer uintptr // offset within the data where the error was detected - Data []byte // data, known as original datagram field - Extensions []Extension // extensions -} - -// Len implements the Len method of MessageBody interface. -func (p *ParamProb) Len(proto int) int { - if p == nil { - return 0 - } - l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) - return 4 + l -} - -// Marshal implements the Marshal method of MessageBody interface. -func (p *ParamProb) Marshal(proto int) ([]byte, error) { - if proto == iana.ProtocolIPv6ICMP { - b := make([]byte, p.Len(proto)) - binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) - copy(b[4:], p.Data) - return b, nil - } - b, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions) - if err != nil { - return nil, err - } - b[0] = byte(p.Pointer) - return b, nil -} - -// parseParamProb parses b as an ICMP parameter problem message body. -func parseParamProb(proto int, b []byte) (MessageBody, error) { - if len(b) < 4 { - return nil, errMessageTooShort - } - p := &ParamProb{} - if proto == iana.ProtocolIPv6ICMP { - p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) - p.Data = make([]byte, len(b)-4) - copy(p.Data, b[4:]) - return p, nil - } - p.Pointer = uintptr(b[0]) - var err error - p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) - if err != nil { - return nil, err - } - return p, nil -} diff --git a/vendor/golang.org/x/net/icmp/ping_test.go b/vendor/golang.org/x/net/icmp/ping_test.go deleted file mode 100644 index 3171dad1..00000000 --- a/vendor/golang.org/x/net/icmp/ping_test.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp_test - -import ( - "errors" - "fmt" - "net" - "os" - "runtime" - "sync" - "testing" - "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { - const host = "www.google.com" - ips, err := net.LookupIP(host) - if err != nil { - return nil, err - } - netaddr := func(ip net.IP) (net.Addr, error) { - switch c.LocalAddr().(type) { - case *net.UDPAddr: - return &net.UDPAddr{IP: ip}, nil - case *net.IPAddr: - return &net.IPAddr{IP: ip}, nil - default: - return nil, errors.New("neither UDPAddr nor IPAddr") - } - } - for _, ip := range ips { - switch protocol { - case iana.ProtocolICMP: - if ip.To4() != nil { - return netaddr(ip) - } - case iana.ProtocolIPv6ICMP: - if ip.To16() != nil && ip.To4() == nil { - return netaddr(ip) - } - } - } - return nil, errors.New("no A or AAAA record") -} - -type pingTest struct { - network, address string - protocol int - mtype icmp.Type -} - -var nonPrivilegedPingTests = []pingTest{ - {"udp4", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, - - {"udp6", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, -} - -func TestNonPrivilegedPing(t *testing.T) { - if testing.Short() { - t.Skip("avoid external network") - } - switch runtime.GOOS { - case "darwin": - case "linux": - t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") - default: - t.Skipf("not supported on %s", runtime.GOOS) - } - - for i, tt := range nonPrivilegedPingTests { - if err := doPing(tt, i); err != nil { - t.Error(err) - } - } -} - -var privilegedPingTests = []pingTest{ - {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, - - {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, -} - -func TestPrivilegedPing(t *testing.T) { - if testing.Short() { - t.Skip("avoid external network") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - for i, tt := range privilegedPingTests { - if err := doPing(tt, i); err != nil { - t.Error(err) - } - } -} - -func doPing(tt pingTest, seq int) error { - c, err := icmp.ListenPacket(tt.network, tt.address) - if err != nil { - return err - } - defer c.Close() - - dst, err := googleAddr(c, tt.protocol) - if err != nil { - return err - } - - if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP { - var f ipv6.ICMPFilter - f.SetAll(true) - f.Accept(ipv6.ICMPTypeDestinationUnreachable) - f.Accept(ipv6.ICMPTypePacketTooBig) - f.Accept(ipv6.ICMPTypeTimeExceeded) - f.Accept(ipv6.ICMPTypeParameterProblem) - f.Accept(ipv6.ICMPTypeEchoReply) - if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { - return err - } - } - - wm := icmp.Message{ - Type: tt.mtype, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: 1 << uint(seq), - Data: []byte("HELLO-R-U-THERE"), - }, - } - wb, err := wm.Marshal(nil) - if err != nil { - return err - } - if n, err := c.WriteTo(wb, dst); err != nil { - return err - } else if n != len(wb) { - return fmt.Errorf("got %v; want %v", n, len(wb)) - } - - rb := make([]byte, 1500) - if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { - return err - } - n, peer, err := c.ReadFrom(rb) - if err != nil { - return err - } - rm, err := icmp.ParseMessage(tt.protocol, rb[:n]) - if err != nil { - return err - } - switch rm.Type { - case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply: - return nil - default: - return fmt.Errorf("got %+v from %v; want echo reply", rm, peer) - } -} - -func TestConcurrentNonPrivilegedListenPacket(t *testing.T) { - if testing.Short() { - t.Skip("avoid external network") - } - switch runtime.GOOS { - case "darwin": - case "linux": - t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") - default: - t.Skipf("not supported on %s", runtime.GOOS) - } - - network, address := "udp4", "127.0.0.1" - if !nettest.SupportsIPv4() { - network, address = "udp6", "::1" - } - const N = 1000 - var wg sync.WaitGroup - wg.Add(N) - for i := 0; i < N; i++ { - go func() { - defer wg.Done() - c, err := icmp.ListenPacket(network, address) - if err != nil { - t.Error(err) - return - } - c.Close() - }() - } - wg.Wait() -} diff --git a/vendor/golang.org/x/net/icmp/sys_freebsd.go b/vendor/golang.org/x/net/icmp/sys_freebsd.go deleted file mode 100644 index c75f3dda..00000000 --- a/vendor/golang.org/x/net/icmp/sys_freebsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -import "syscall" - -func init() { - freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") -} diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go deleted file mode 100644 index 344e1584..00000000 --- a/vendor/golang.org/x/net/icmp/timeexceeded.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp - -// A TimeExceeded represents an ICMP time exceeded message body. -type TimeExceeded struct { - Data []byte // data, known as original datagram field - Extensions []Extension // extensions -} - -// Len implements the Len method of MessageBody interface. -func (p *TimeExceeded) Len(proto int) int { - if p == nil { - return 0 - } - l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) - return 4 + l -} - -// Marshal implements the Marshal method of MessageBody interface. -func (p *TimeExceeded) Marshal(proto int) ([]byte, error) { - return marshalMultipartMessageBody(proto, p.Data, p.Extensions) -} - -// parseTimeExceeded parses b as an ICMP time exceeded message body. -func parseTimeExceeded(proto int, b []byte) (MessageBody, error) { - if len(b) < 4 { - return nil, errMessageTooShort - } - p := &TimeExceeded{} - var err error - p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) - if err != nil { - return nil, err - } - return p, nil -} diff --git a/vendor/golang.org/x/net/idna/example_test.go b/vendor/golang.org/x/net/idna/example_test.go deleted file mode 100644 index 948f6eb2..00000000 --- a/vendor/golang.org/x/net/idna/example_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna_test - -import ( - "fmt" - - "golang.org/x/net/idna" -) - -func ExampleProfile() { - // Raw Punycode has no restrictions and does no mappings. - fmt.Println(idna.ToASCII("")) - fmt.Println(idna.ToASCII("*.faß.com")) - fmt.Println(idna.Punycode.ToASCII("*.faß.com")) - - // Rewrite IDN for lookup. This (currently) uses transitional mappings to - // find a balance between IDNA2003 and IDNA2008 compatibility. - fmt.Println(idna.Lookup.ToASCII("")) - fmt.Println(idna.Lookup.ToASCII("www.faß.com")) - - // Convert an IDN to ASCII for registration purposes. This changes the - // encoding, but reports an error if the input was illformed. - fmt.Println(idna.Registration.ToASCII("")) - fmt.Println(idna.Registration.ToASCII("www.faß.com")) - - // Output: - // - // *.xn--fa-hia.com - // *.xn--fa-hia.com - // - // www.fass.com - // idna: invalid label "" - // www.xn--fa-hia.com -} - -func ExampleNew() { - var p *idna.Profile - - // Raw Punycode has no restrictions and does no mappings. - p = idna.New() - fmt.Println(p.ToASCII("*.faß.com")) - - // Do mappings. Note that star is not allowed in a DNS lookup. - p = idna.New( - idna.MapForLookup(), - idna.Transitional(true)) // Map ß -> ss - fmt.Println(p.ToASCII("*.faß.com")) - - // Lookup for registration. Also does not allow '*'. - p = idna.New(idna.ValidateForRegistration()) - fmt.Println(p.ToUnicode("*.faß.com")) - - // Set up a profile maps for lookup, but allows wild cards. - p = idna.New( - idna.MapForLookup(), - idna.Transitional(true), // Map ß -> ss - idna.StrictDomainName(false)) // Set more permissive ASCII rules. - fmt.Println(p.ToASCII("*.faß.com")) - - // Output: - // *.xn--fa-hia.com - // *.fass.com idna: disallowed rune U+002A - // *.faß.com idna: disallowed rune U+002A - // *.fass.com -} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go deleted file mode 100644 index ec8232b2..00000000 --- a/vendor/golang.org/x/net/idna/idna.go +++ /dev/null @@ -1,680 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package idna implements IDNA2008 using the compatibility processing -// defined by UTS (Unicode Technical Standard) #46, which defines a standard to -// deal with the transition from IDNA2003. -// -// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC -// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. -// UTS #46 is defined in http://www.unicode.org/reports/tr46. -// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the -// differences between these two standards. -package idna // import "golang.org/x/net/idna" - -import ( - "fmt" - "strings" - "unicode/utf8" - - "golang.org/x/text/secure/bidirule" - "golang.org/x/text/unicode/norm" -) - -// NOTE: Unlike common practice in Go APIs, the functions will return a -// sanitized domain name in case of errors. Browsers sometimes use a partially -// evaluated string as lookup. -// TODO: the current error handling is, in my opinion, the least opinionated. -// Other strategies are also viable, though: -// Option 1) Return an empty string in case of error, but allow the user to -// specify explicitly which errors to ignore. -// Option 2) Return the partially evaluated string if it is itself a valid -// string, otherwise return the empty string in case of error. -// Option 3) Option 1 and 2. -// Option 4) Always return an empty string for now and implement Option 1 as -// needed, and document that the return string may not be empty in case of -// error in the future. -// I think Option 1 is best, but it is quite opinionated. - -// ToASCII is a wrapper for Punycode.ToASCII. -func ToASCII(s string) (string, error) { - return Punycode.process(s, true) -} - -// ToUnicode is a wrapper for Punycode.ToUnicode. -func ToUnicode(s string) (string, error) { - return Punycode.process(s, false) -} - -// An Option configures a Profile at creation time. -type Option func(*options) - -// Transitional sets a Profile to use the Transitional mapping as defined in UTS -// #46. This will cause, for example, "ß" to be mapped to "ss". Using the -// transitional mapping provides a compromise between IDNA2003 and IDNA2008 -// compatibility. It is used by most browsers when resolving domain names. This -// option is only meaningful if combined with MapForLookup. -func Transitional(transitional bool) Option { - return func(o *options) { o.transitional = true } -} - -// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts -// are longer than allowed by the RFC. -func VerifyDNSLength(verify bool) Option { - return func(o *options) { o.verifyDNSLength = verify } -} - -// RemoveLeadingDots removes leading label separators. Leading runes that map to -// dots, such as U+3002, are removed as well. -// -// This is the behavior suggested by the UTS #46 and is adopted by some -// browsers. -func RemoveLeadingDots(remove bool) Option { - return func(o *options) { o.removeLeadingDots = remove } -} - -// ValidateLabels sets whether to check the mandatory label validation criteria -// as defined in Section 5.4 of RFC 5891. This includes testing for correct use -// of hyphens ('-'), normalization, validity of runes, and the context rules. -func ValidateLabels(enable bool) Option { - return func(o *options) { - // Don't override existing mappings, but set one that at least checks - // normalization if it is not set. - if o.mapping == nil && enable { - o.mapping = normalize - } - o.trie = trie - o.validateLabels = enable - o.fromPuny = validateFromPunycode - } -} - -// StrictDomainName limits the set of permissable ASCII characters to those -// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the -// hyphen). This is set by default for MapForLookup and ValidateForRegistration. -// -// This option is useful, for instance, for browsers that allow characters -// outside this range, for example a '_' (U+005F LOW LINE). See -// http://www.rfc-editor.org/std/std3.txt for more details This option -// corresponds to the UseSTD3ASCIIRules option in UTS #46. -func StrictDomainName(use bool) Option { - return func(o *options) { - o.trie = trie - o.useSTD3Rules = use - o.fromPuny = validateFromPunycode - } -} - -// NOTE: the following options pull in tables. The tables should not be linked -// in as long as the options are not used. - -// BidiRule enables the Bidi rule as defined in RFC 5893. Any application -// that relies on proper validation of labels should include this rule. -func BidiRule() Option { - return func(o *options) { o.bidirule = bidirule.ValidString } -} - -// ValidateForRegistration sets validation options to verify that a given IDN is -// properly formatted for registration as defined by Section 4 of RFC 5891. -func ValidateForRegistration() Option { - return func(o *options) { - o.mapping = validateRegistration - StrictDomainName(true)(o) - ValidateLabels(true)(o) - VerifyDNSLength(true)(o) - BidiRule()(o) - } -} - -// MapForLookup sets validation and mapping options such that a given IDN is -// transformed for domain name lookup according to the requirements set out in -// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, -// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option -// to add this check. -// -// The mappings include normalization and mapping case, width and other -// compatibility mappings. -func MapForLookup() Option { - return func(o *options) { - o.mapping = validateAndMap - StrictDomainName(true)(o) - ValidateLabels(true)(o) - RemoveLeadingDots(true)(o) - } -} - -type options struct { - transitional bool - useSTD3Rules bool - validateLabels bool - verifyDNSLength bool - removeLeadingDots bool - - trie *idnaTrie - - // fromPuny calls validation rules when converting A-labels to U-labels. - fromPuny func(p *Profile, s string) error - - // mapping implements a validation and mapping step as defined in RFC 5895 - // or UTS 46, tailored to, for example, domain registration or lookup. - mapping func(p *Profile, s string) (string, error) - - // bidirule, if specified, checks whether s conforms to the Bidi Rule - // defined in RFC 5893. - bidirule func(s string) bool -} - -// A Profile defines the configuration of an IDNA mapper. -type Profile struct { - options -} - -func apply(o *options, opts []Option) { - for _, f := range opts { - f(o) - } -} - -// New creates a new Profile. -// -// With no options, the returned Profile is the most permissive and equals the -// Punycode Profile. Options can be passed to further restrict the Profile. The -// MapForLookup and ValidateForRegistration options set a collection of options, -// for lookup and registration purposes respectively, which can be tailored by -// adding more fine-grained options, where later options override earlier -// options. -func New(o ...Option) *Profile { - p := &Profile{} - apply(&p.options, o) - return p -} - -// ToASCII converts a domain or domain label to its ASCII form. For example, -// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and -// ToASCII("golang") is "golang". If an error is encountered it will return -// an error and a (partially) processed result. -func (p *Profile) ToASCII(s string) (string, error) { - return p.process(s, true) -} - -// ToUnicode converts a domain or domain label to its Unicode form. For example, -// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and -// ToUnicode("golang") is "golang". If an error is encountered it will return -// an error and a (partially) processed result. -func (p *Profile) ToUnicode(s string) (string, error) { - pp := *p - pp.transitional = false - return pp.process(s, false) -} - -// String reports a string with a description of the profile for debugging -// purposes. The string format may change with different versions. -func (p *Profile) String() string { - s := "" - if p.transitional { - s = "Transitional" - } else { - s = "NonTransitional" - } - if p.useSTD3Rules { - s += ":UseSTD3Rules" - } - if p.validateLabels { - s += ":ValidateLabels" - } - if p.verifyDNSLength { - s += ":VerifyDNSLength" - } - return s -} - -var ( - // Punycode is a Profile that does raw punycode processing with a minimum - // of validation. - Punycode *Profile = punycode - - // Lookup is the recommended profile for looking up domain names, according - // to Section 5 of RFC 5891. The exact configuration of this profile may - // change over time. - Lookup *Profile = lookup - - // Display is the recommended profile for displaying domain names. - // The configuration of this profile may change over time. - Display *Profile = display - - // Registration is the recommended profile for checking whether a given - // IDN is valid for registration, according to Section 4 of RFC 5891. - Registration *Profile = registration - - punycode = &Profile{} - lookup = &Profile{options{ - transitional: true, - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, - }} - display = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, - }} - registration = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - verifyDNSLength: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateRegistration, - bidirule: bidirule.ValidString, - }} - - // TODO: profiles - // Register: recommended for approving domain names: don't do any mappings - // but rather reject on invalid input. Bundle or block deviation characters. -) - -type labelError struct{ label, code_ string } - -func (e labelError) code() string { return e.code_ } -func (e labelError) Error() string { - return fmt.Sprintf("idna: invalid label %q", e.label) -} - -type runeError rune - -func (e runeError) code() string { return "P1" } -func (e runeError) Error() string { - return fmt.Sprintf("idna: disallowed rune %U", e) -} - -// process implements the algorithm described in section 4 of UTS #46, -// see http://www.unicode.org/reports/tr46. -func (p *Profile) process(s string, toASCII bool) (string, error) { - var err error - if p.mapping != nil { - s, err = p.mapping(p, s) - } - // Remove leading empty labels. - if p.removeLeadingDots { - for ; len(s) > 0 && s[0] == '.'; s = s[1:] { - } - } - // It seems like we should only create this error on ToASCII, but the - // UTS 46 conformance tests suggests we should always check this. - if err == nil && p.verifyDNSLength && s == "" { - err = &labelError{s, "A4"} - } - labels := labelIter{orig: s} - for ; !labels.done(); labels.next() { - label := labels.label() - if label == "" { - // Empty labels are not okay. The label iterator skips the last - // label if it is empty. - if err == nil && p.verifyDNSLength { - err = &labelError{s, "A4"} - } - continue - } - if strings.HasPrefix(label, acePrefix) { - u, err2 := decode(label[len(acePrefix):]) - if err2 != nil { - if err == nil { - err = err2 - } - // Spec says keep the old label. - continue - } - labels.set(u) - if err == nil && p.validateLabels { - err = p.fromPuny(p, u) - } - if err == nil { - // This should be called on NonTransitional, according to the - // spec, but that currently does not have any effect. Use the - // original profile to preserve options. - err = p.validateLabel(u) - } - } else if err == nil { - err = p.validateLabel(label) - } - } - if toASCII { - for labels.reset(); !labels.done(); labels.next() { - label := labels.label() - if !ascii(label) { - a, err2 := encode(acePrefix, label) - if err == nil { - err = err2 - } - label = a - labels.set(a) - } - n := len(label) - if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { - err = &labelError{label, "A4"} - } - } - } - s = labels.result() - if toASCII && p.verifyDNSLength && err == nil { - // Compute the length of the domain name minus the root label and its dot. - n := len(s) - if n > 0 && s[n-1] == '.' { - n-- - } - if len(s) < 1 || n > 253 { - err = &labelError{s, "A4"} - } - } - return s, err -} - -func normalize(p *Profile, s string) (string, error) { - return norm.NFC.String(s), nil -} - -func validateRegistration(p *Profile, s string) (string, error) { - if !norm.NFC.IsNormalString(s) { - return s, &labelError{s, "V1"} - } - for i := 0; i < len(s); { - v, sz := trie.lookupString(s[i:]) - // Copy bytes not copied so far. - switch p.simplify(info(v).category()) { - // TODO: handle the NV8 defined in the Unicode idna data set to allow - // for strict conformance to IDNA2008. - case valid, deviation: - case disallowed, mapped, unknown, ignored: - r, _ := utf8.DecodeRuneInString(s[i:]) - return s, runeError(r) - } - i += sz - } - return s, nil -} - -func validateAndMap(p *Profile, s string) (string, error) { - var ( - err error - b []byte - k int - ) - for i := 0; i < len(s); { - v, sz := trie.lookupString(s[i:]) - start := i - i += sz - // Copy bytes not copied so far. - switch p.simplify(info(v).category()) { - case valid: - continue - case disallowed: - if err == nil { - r, _ := utf8.DecodeRuneInString(s[start:]) - err = runeError(r) - } - continue - case mapped, deviation: - b = append(b, s[k:start]...) - b = info(v).appendMapping(b, s[start:i]) - case ignored: - b = append(b, s[k:start]...) - // drop the rune - case unknown: - b = append(b, s[k:start]...) - b = append(b, "\ufffd"...) - } - k = i - } - if k == 0 { - // No changes so far. - s = norm.NFC.String(s) - } else { - b = append(b, s[k:]...) - if norm.NFC.QuickSpan(b) != len(b) { - b = norm.NFC.Bytes(b) - } - // TODO: the punycode converters require strings as input. - s = string(b) - } - return s, err -} - -// A labelIter allows iterating over domain name labels. -type labelIter struct { - orig string - slice []string - curStart int - curEnd int - i int -} - -func (l *labelIter) reset() { - l.curStart = 0 - l.curEnd = 0 - l.i = 0 -} - -func (l *labelIter) done() bool { - return l.curStart >= len(l.orig) -} - -func (l *labelIter) result() string { - if l.slice != nil { - return strings.Join(l.slice, ".") - } - return l.orig -} - -func (l *labelIter) label() string { - if l.slice != nil { - return l.slice[l.i] - } - p := strings.IndexByte(l.orig[l.curStart:], '.') - l.curEnd = l.curStart + p - if p == -1 { - l.curEnd = len(l.orig) - } - return l.orig[l.curStart:l.curEnd] -} - -// next sets the value to the next label. It skips the last label if it is empty. -func (l *labelIter) next() { - l.i++ - if l.slice != nil { - if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { - l.curStart = len(l.orig) - } - } else { - l.curStart = l.curEnd + 1 - if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { - l.curStart = len(l.orig) - } - } -} - -func (l *labelIter) set(s string) { - if l.slice == nil { - l.slice = strings.Split(l.orig, ".") - } - l.slice[l.i] = s -} - -// acePrefix is the ASCII Compatible Encoding prefix. -const acePrefix = "xn--" - -func (p *Profile) simplify(cat category) category { - switch cat { - case disallowedSTD3Mapped: - if p.useSTD3Rules { - cat = disallowed - } else { - cat = mapped - } - case disallowedSTD3Valid: - if p.useSTD3Rules { - cat = disallowed - } else { - cat = valid - } - case deviation: - if !p.transitional { - cat = valid - } - case validNV8, validXV8: - // TODO: handle V2008 - cat = valid - } - return cat -} - -func validateFromPunycode(p *Profile, s string) error { - if !norm.NFC.IsNormalString(s) { - return &labelError{s, "V1"} - } - for i := 0; i < len(s); { - v, sz := trie.lookupString(s[i:]) - if c := p.simplify(info(v).category()); c != valid && c != deviation { - return &labelError{s, "V6"} - } - i += sz - } - return nil -} - -const ( - zwnj = "\u200c" - zwj = "\u200d" -) - -type joinState int8 - -const ( - stateStart joinState = iota - stateVirama - stateBefore - stateBeforeVirama - stateAfter - stateFAIL -) - -var joinStates = [][numJoinTypes]joinState{ - stateStart: { - joiningL: stateBefore, - joiningD: stateBefore, - joinZWNJ: stateFAIL, - joinZWJ: stateFAIL, - joinVirama: stateVirama, - }, - stateVirama: { - joiningL: stateBefore, - joiningD: stateBefore, - }, - stateBefore: { - joiningL: stateBefore, - joiningD: stateBefore, - joiningT: stateBefore, - joinZWNJ: stateAfter, - joinZWJ: stateFAIL, - joinVirama: stateBeforeVirama, - }, - stateBeforeVirama: { - joiningL: stateBefore, - joiningD: stateBefore, - joiningT: stateBefore, - }, - stateAfter: { - joiningL: stateFAIL, - joiningD: stateBefore, - joiningT: stateAfter, - joiningR: stateStart, - joinZWNJ: stateFAIL, - joinZWJ: stateFAIL, - joinVirama: stateAfter, // no-op as we can't accept joiners here - }, - stateFAIL: { - 0: stateFAIL, - joiningL: stateFAIL, - joiningD: stateFAIL, - joiningT: stateFAIL, - joiningR: stateFAIL, - joinZWNJ: stateFAIL, - joinZWJ: stateFAIL, - joinVirama: stateFAIL, - }, -} - -// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are -// already implicitly satisfied by the overall implementation. -func (p *Profile) validateLabel(s string) error { - if s == "" { - if p.verifyDNSLength { - return &labelError{s, "A4"} - } - return nil - } - if p.bidirule != nil && !p.bidirule(s) { - return &labelError{s, "B"} - } - if !p.validateLabels { - return nil - } - trie := p.trie // p.validateLabels is only set if trie is set. - if len(s) > 4 && s[2] == '-' && s[3] == '-' { - return &labelError{s, "V2"} - } - if s[0] == '-' || s[len(s)-1] == '-' { - return &labelError{s, "V3"} - } - // TODO: merge the use of this in the trie. - v, sz := trie.lookupString(s) - x := info(v) - if x.isModifier() { - return &labelError{s, "V5"} - } - // Quickly return in the absence of zero-width (non) joiners. - if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { - return nil - } - st := stateStart - for i := 0; ; { - jt := x.joinType() - if s[i:i+sz] == zwj { - jt = joinZWJ - } else if s[i:i+sz] == zwnj { - jt = joinZWNJ - } - st = joinStates[st][jt] - if x.isViramaModifier() { - st = joinStates[st][joinVirama] - } - if i += sz; i == len(s) { - break - } - v, sz = trie.lookupString(s[i:]) - x = info(v) - } - if st == stateFAIL || st == stateAfter { - return &labelError{s, "C"} - } - return nil -} - -func ascii(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} diff --git a/vendor/golang.org/x/net/idna/idna_test.go b/vendor/golang.org/x/net/idna/idna_test.go deleted file mode 100644 index 0b067cac..00000000 --- a/vendor/golang.org/x/net/idna/idna_test.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -import ( - "testing" -) - -var idnaTestCases = [...]struct { - ascii, unicode string -}{ - // Labels. - {"books", "books"}, - {"xn--bcher-kva", "bücher"}, - - // Domains. - {"foo--xn--bar.org", "foo--xn--bar.org"}, - {"golang.org", "golang.org"}, - {"example.xn--p1ai", "example.рф"}, - {"xn--czrw28b.tw", "商業.tw"}, - {"www.xn--mller-kva.de", "www.müller.de"}, -} - -func TestIDNA(t *testing.T) { - for _, tc := range idnaTestCases { - if a, err := ToASCII(tc.unicode); err != nil { - t.Errorf("ToASCII(%q): %v", tc.unicode, err) - } else if a != tc.ascii { - t.Errorf("ToASCII(%q): got %q, want %q", tc.unicode, a, tc.ascii) - } - - if u, err := ToUnicode(tc.ascii); err != nil { - t.Errorf("ToUnicode(%q): %v", tc.ascii, err) - } else if u != tc.unicode { - t.Errorf("ToUnicode(%q): got %q, want %q", tc.ascii, u, tc.unicode) - } - } -} - -func TestIDNASeparators(t *testing.T) { - type subCase struct { - unicode string - wantASCII string - wantErr bool - } - - testCases := []struct { - name string - profile *Profile - subCases []subCase - }{ - { - name: "Punycode", profile: Punycode, - subCases: []subCase{ - {"example\u3002jp", "xn--examplejp-ck3h", false}, - {"æ±äº¬\uFF0Ejp", "xn--jp-l92cn98g071o", false}, - {"大阪\uFF61jp", "xn--jp-ku9cz72u463f", false}, - }, - }, - { - name: "Lookup", profile: Lookup, - subCases: []subCase{ - {"example\u3002jp", "example.jp", false}, - {"æ±äº¬\uFF0Ejp", "xn--1lqs71d.jp", false}, - {"大阪\uFF61jp", "xn--pssu33l.jp", false}, - }, - }, - { - name: "Display", profile: Display, - subCases: []subCase{ - {"example\u3002jp", "example.jp", false}, - {"æ±äº¬\uFF0Ejp", "xn--1lqs71d.jp", false}, - {"大阪\uFF61jp", "xn--pssu33l.jp", false}, - }, - }, - { - name: "Registration", profile: Registration, - subCases: []subCase{ - {"example\u3002jp", "", true}, - {"æ±äº¬\uFF0Ejp", "", true}, - {"大阪\uFF61jp", "", true}, - }, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - for _, c := range tc.subCases { - gotA, err := tc.profile.ToASCII(c.unicode) - if c.wantErr { - if err == nil { - t.Errorf("ToASCII(%q): got no error, but an error expected", c.unicode) - } - } else { - if err != nil { - t.Errorf("ToASCII(%q): got err=%v, but no error expected", c.unicode, err) - } else if gotA != c.wantASCII { - t.Errorf("ToASCII(%q): got %q, want %q", c.unicode, gotA, c.wantASCII) - } - } - } - }) - } -} - -// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode -// return errors. diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go deleted file mode 100644 index 02c7d59a..00000000 --- a/vendor/golang.org/x/net/idna/punycode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -// This file implements the Punycode algorithm from RFC 3492. - -import ( - "math" - "strings" - "unicode/utf8" -) - -// These parameter values are specified in section 5. -// -// All computation is done with int32s, so that overflow behavior is identical -// regardless of whether int is 32-bit or 64-bit. -const ( - base int32 = 36 - damp int32 = 700 - initialBias int32 = 72 - initialN int32 = 128 - skew int32 = 38 - tmax int32 = 26 - tmin int32 = 1 -) - -func punyError(s string) error { return &labelError{s, "A3"} } - -// decode decodes a string as specified in section 6.2. -func decode(encoded string) (string, error) { - if encoded == "" { - return "", nil - } - pos := 1 + strings.LastIndex(encoded, "-") - if pos == 1 { - return "", punyError(encoded) - } - if pos == len(encoded) { - return encoded[:len(encoded)-1], nil - } - output := make([]rune, 0, len(encoded)) - if pos != 0 { - for _, r := range encoded[:pos-1] { - output = append(output, r) - } - } - i, n, bias := int32(0), initialN, initialBias - for pos < len(encoded) { - oldI, w := i, int32(1) - for k := base; ; k += base { - if pos == len(encoded) { - return "", punyError(encoded) - } - digit, ok := decodeDigit(encoded[pos]) - if !ok { - return "", punyError(encoded) - } - pos++ - i += digit * w - if i < 0 { - return "", punyError(encoded) - } - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if digit < t { - break - } - w *= base - t - if w >= math.MaxInt32/base { - return "", punyError(encoded) - } - } - x := int32(len(output) + 1) - bias = adapt(i-oldI, x, oldI == 0) - n += i / x - i %= x - if n > utf8.MaxRune || len(output) >= 1024 { - return "", punyError(encoded) - } - output = append(output, 0) - copy(output[i+1:], output[i:]) - output[i] = n - i++ - } - return string(output), nil -} - -// encode encodes a string as specified in section 6.3 and prepends prefix to -// the result. -// -// The "while h < length(input)" line in the specification becomes "for -// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. -func encode(prefix, s string) (string, error) { - output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) - copy(output, prefix) - delta, n, bias := int32(0), initialN, initialBias - b, remaining := int32(0), int32(0) - for _, r := range s { - if r < 0x80 { - b++ - output = append(output, byte(r)) - } else { - remaining++ - } - } - h := b - if b > 0 { - output = append(output, '-') - } - for remaining != 0 { - m := int32(0x7fffffff) - for _, r := range s { - if m > r && r >= n { - m = r - } - } - delta += (m - n) * (h + 1) - if delta < 0 { - return "", punyError(s) - } - n = m - for _, r := range s { - if r < n { - delta++ - if delta < 0 { - return "", punyError(s) - } - continue - } - if r > n { - continue - } - q := delta - for k := base; ; k += base { - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if q < t { - break - } - output = append(output, encodeDigit(t+(q-t)%(base-t))) - q = (q - t) / (base - t) - } - output = append(output, encodeDigit(q)) - bias = adapt(delta, h+1, h == b) - delta = 0 - h++ - remaining-- - } - delta++ - n++ - } - return string(output), nil -} - -func decodeDigit(x byte) (digit int32, ok bool) { - switch { - case '0' <= x && x <= '9': - return int32(x - ('0' - 26)), true - case 'A' <= x && x <= 'Z': - return int32(x - 'A'), true - case 'a' <= x && x <= 'z': - return int32(x - 'a'), true - } - return 0, false -} - -func encodeDigit(digit int32) byte { - switch { - case 0 <= digit && digit < 26: - return byte(digit + 'a') - case 26 <= digit && digit < 36: - return byte(digit + ('0' - 26)) - } - panic("idna: internal error in punycode encoding") -} - -// adapt is the bias adaptation function specified in section 6.1. -func adapt(delta, numPoints int32, firstTime bool) int32 { - if firstTime { - delta /= damp - } else { - delta /= 2 - } - delta += delta / numPoints - k := int32(0) - for delta > ((base-tmin)*tmax)/2 { - delta /= base - tmin - k += base - } - return k + (base-tmin+1)*delta/(delta+skew) -} diff --git a/vendor/golang.org/x/net/idna/punycode_test.go b/vendor/golang.org/x/net/idna/punycode_test.go deleted file mode 100644 index bfec81de..00000000 --- a/vendor/golang.org/x/net/idna/punycode_test.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -import ( - "strings" - "testing" -) - -var punycodeTestCases = [...]struct { - s, encoded string -}{ - {"", ""}, - {"-", "--"}, - {"-a", "-a-"}, - {"-a-", "-a--"}, - {"a", "a-"}, - {"a-", "a--"}, - {"a-b", "a-b-"}, - {"books", "books-"}, - {"bücher", "bcher-kva"}, - {"Hello世界", "Hello-ck1hg65u"}, - {"ü", "tda"}, - {"üý", "tdac"}, - - // The test cases below come from RFC 3492 section 7.1 with Errata 3026. - { - // (A) Arabic (Egyptian). - "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + - "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", - "egbpdaj6bu4bxfgehfvwxn", - }, - { - // (B) Chinese (simplified). - "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", - "ihqwcrb4cv8a8dqg056pqjye", - }, - { - // (C) Chinese (traditional). - "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", - "ihqwctvzc91f659drss3x8bo0yb", - }, - { - // (D) Czech. - "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + - "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + - "\u0065\u0073\u006B\u0079", - "Proprostnemluvesky-uyb24dma41a", - }, - { - // (E) Hebrew. - "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + - "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + - "\u05D1\u05E8\u05D9\u05EA", - "4dbcagdahymbxekheh6e0a7fei0b", - }, - { - // (F) Hindi (Devanagari). - "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + - "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + - "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + - "\u0939\u0948\u0902", - "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", - }, - { - // (G) Japanese (kanji and hiragana). - "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + - "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", - "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", - }, - { - // (H) Korean (Hangul syllables). - "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + - "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + - "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", - "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + - "psd879ccm6fea98c", - }, - { - // (I) Russian (Cyrillic). - "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + - "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + - "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + - "\u0438", - "b1abfaaepdrnnbgefbadotcwatmq2g4l", - }, - { - // (J) Spanish. - "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + - "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + - "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + - "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + - "\u0061\u00F1\u006F\u006C", - "PorqunopuedensimplementehablarenEspaol-fmd56a", - }, - { - // (K) Vietnamese. - "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + - "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + - "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + - "\u0056\u0069\u1EC7\u0074", - "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", - }, - { - // (L) 3B. - "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", - "3B-ww4c5e180e575a65lsy2b", - }, - { - // (M) -with-SUPER-MONKEYS. - "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + - "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + - "\u004F\u004E\u004B\u0045\u0059\u0053", - "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", - }, - { - // (N) Hello-Another-Way-. - "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + - "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + - "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", - "Hello-Another-Way--fc4qua05auwb3674vfr0b", - }, - { - // (O) 2. - "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", - "2-u9tlzr9756bt3uc0v", - }, - { - // (P) MajiKoi5 - "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + - "\u308B\u0035\u79D2\u524D", - "MajiKoi5-783gue6qz075azm5e", - }, - { - // (Q) de - "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", - "de-jg4avhby1noc0d", - }, - { - // (R) - "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", - "d9juau41awczczp", - }, - { - // (S) -> $1.00 <- - "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + - "\u003C\u002D", - "-> $1.00 <--", - }, -} - -func TestPunycode(t *testing.T) { - for _, tc := range punycodeTestCases { - if got, err := decode(tc.encoded); err != nil { - t.Errorf("decode(%q): %v", tc.encoded, err) - } else if got != tc.s { - t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s) - } - - if got, err := encode("", tc.s); err != nil { - t.Errorf(`encode("", %q): %v`, tc.s, err) - } else if got != tc.encoded { - t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) - } - } -} - -var punycodeErrorTestCases = [...]string{ - "decode -", // A sole '-' is invalid. - "decode foo\x00bar", // '\x00' is not in [0-9A-Za-z]. - "decode foo#bar", // '#' is not in [0-9A-Za-z]. - "decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z]. - "decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated. - "decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF. - "decode 9999999999a", // "9999999999a" overflows the int32 calculation. - - "encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow. -} - -func TestPunycodeErrors(t *testing.T) { - for _, tc := range punycodeErrorTestCases { - var err error - switch { - case strings.HasPrefix(tc, "decode "): - _, err = decode(tc[7:]) - case strings.HasPrefix(tc, "encode "): - _, err = encode("", tc[7:]) - } - if err == nil { - if len(tc) > 256 { - tc = tc[:100] + "..." + tc[len(tc)-100:] - } - t.Errorf("no error for %s", tc) - } - } -} diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go deleted file mode 100644 index d2819345..00000000 --- a/vendor/golang.org/x/net/idna/tables.go +++ /dev/null @@ -1,4477 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package idna - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" - -var mappings string = "" + // Size: 8176 bytes - "\x00\x01 \x03 ̈\x01a\x03 Ì„\x012\x013\x03 Ì\x03 ̧\x011\x01o\x051â„4\x051â„2" + - "\x053â„4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03â±¥\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + - "\x03 ̆\x03 ̇\x03 ÌŠ\x03 ̨\x03 ̃\x03 Ì‹\x01l\x01x\x04̈Ì\x03 ι\x01;\x05 ̈Ì" + - "\x04Õ¥Ö‚\x04اٴ\x04وٴ\x04Û‡Ù´\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + - "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + - "\x06à¹à¸²\x06à»àº²\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + - "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06à¾à¾µ\x02" + - "в\x02д\x02о\x02Ñ\x02Ñ‚\x02ÑŠ\x02Ñ£\x02æ\x01b\x01d\x01e\x02Ç\x01g\x01i\x01k" + - "\x01m\x01n\x02È£\x01p\x01t\x01u\x02É\x02É‘\x02É™\x02É›\x02Éœ\x02Å‹\x02É”\x02ɯ" + - "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02Ï\x02н\x02É’\x01c\x02É•\x02ð\x01f\x02ÉŸ" + - "\x02É¡\x02É¥\x02ɨ\x02É©\x02ɪ\x02Ê\x02É­\x02ÊŸ\x02ɱ\x02É°\x02ɲ\x02ɳ\x02É´\x02ɵ" + - "\x02ɸ\x02Ê‚\x02ʃ\x02Æ«\x02ʉ\x02ÊŠ\x02Ê‹\x02ÊŒ\x01z\x02Ê\x02Ê‘\x02Ê’\x02θ\x02ss" + - "\x02ά\x02έ\x02ή\x02ί\x02ÏŒ\x02Ï\x02ÏŽ\x05ἀι\x05á¼Î¹\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + - "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + - "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + - "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 Ì“Ì€\x05 Ì“Ì\x05 Ì“Í‚\x02Î\x05 ̔̀\x05 Ì”Ì\x05 ̔͂" + - "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + - "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + - "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02Ã¥\x02×\x02ב\x02×’" + - "\x02ד\x02Ï€\x051â„7\x051â„9\x061â„10\x051â„3\x052â„3\x051â„5\x052â„5\x053â„5\x054" + - "â„5\x051â„6\x055â„6\x051â„8\x053â„8\x055â„8\x057â„8\x041â„\x02ii\x02iv\x02vi" + - "\x04viii\x02ix\x02xi\x050â„3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + - "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + - "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + - "\x02==\x05â«Ì¸\x02É«\x02ɽ\x02È¿\x02É€\x01.\x04 ã‚™\x04 ã‚š\x06より\x06コト\x05(á„€)\x05" + - "(á„‚)\x05(ᄃ)\x05(á„…)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(á„‹)\x05(á„Œ)\x05(á„Ž)\x05(á„)\x05(á„" + - ")\x05(á„‘)\x05(á„’)\x05(ê°€)\x05(나)\x05(다)\x05(ë¼)\x05(마)\x05(ë°”)\x05(사)\x05(ì•„)" + - "\x05(ìž)\x05(ì°¨)\x05(ì¹´)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + - "\x05(二)\x05(三)\x05(å››)\x05(五)\x05(å…­)\x05(七)\x05(å…«)\x05(ä¹)\x05(å)\x05(月)" + - "\x05(ç«)\x05(æ°´)\x05(木)\x05(金)\x05(土)\x05(æ—¥)\x05(æ ª)\x05(有)\x05(社)\x05(å)" + - "\x05(特)\x05(財)\x05(ç¥)\x05(労)\x05(代)\x05(呼)\x05(å­¦)\x05(監)\x05(ä¼)\x05(資)" + - "\x05(å”)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + - "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주ì˜\x0236" + - "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + - "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + - "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + - "インãƒ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + - "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + - "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローãƒ\x09ケース\x09コルナ\x09コーãƒ\x0cサイクル\x0fサンãƒ" + - "ーム\x0cシリング\x09センãƒ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ãƒã‚¤ãƒ„" + - "\x0fパーセント\x09パーツ\x0cãƒãƒ¼ãƒ¬ãƒ«\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + - "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cãƒã‚¤" + - "ント\x09ボルト\x06ホン\x09ãƒãƒ³ãƒ‰\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッãƒ\x09マルク\x0fマ" + - "ンション\x0cミクロン\x06ミリ\x0fミリãƒãƒ¼ãƒ«\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + - "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + - "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + - "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + - "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06å¹³æˆ\x06昭和\x06大正\x06明治\x0cæ ª" + - "å¼ä¼šç¤¾\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + - "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + - "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + - "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + - "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + - "wb\x05v∕m\x05a∕m\x041æ—¥\x042æ—¥\x043æ—¥\x044æ—¥\x045æ—¥\x046æ—¥\x047æ—¥\x048æ—¥\x049æ—¥" + - "\x0510æ—¥\x0511æ—¥\x0512æ—¥\x0513æ—¥\x0514æ—¥\x0515æ—¥\x0516æ—¥\x0517æ—¥\x0518æ—¥\x0519æ—¥" + - "\x0520æ—¥\x0521æ—¥\x0522æ—¥\x0523æ—¥\x0524æ—¥\x0525æ—¥\x0526æ—¥\x0527æ—¥\x0528æ—¥\x0529æ—¥" + - "\x0530æ—¥\x0531æ—¥\x02ÑŒ\x02ɦ\x02ɬ\x02Êž\x02ʇ\x02Å“\x04𤋮\x04𢡊\x04𢡄\x04ð£•\x04𥉉" + - "\x04ð¥³\x04𧻓\x02ff\x02fi\x02fl\x02st\x04Õ´Õ¶\x04Õ´Õ¥\x04Õ´Õ«\x04Õ¾Õ¶\x04Õ´Õ­\x04×™Ö´" + - "\x04ײַ\x02×¢\x02×”\x02×›\x02ל\x02×\x02ר\x02ת\x04ש×\x04שׂ\x06שּ×\x06שּׂ\x04×" + - "Ö·\x04×Ö¸\x04×Ö¼\x04בּ\x04×’Ö¼\x04דּ\x04×”Ö¼\x04וּ\x04×–Ö¼\x04טּ\x04×™Ö¼\x04ךּ\x04" + - "×›Ö¼\x04לּ\x04מּ\x04× Ö¼\x04סּ\x04×£Ö¼\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + - "\x04וֹ\x04בֿ\x04×›Ö¿\x04פֿ\x04×ל\x02Ù±\x02Ù»\x02Ù¾\x02Ú€\x02Ùº\x02Ù¿\x02Ù¹\x02Ú¤" + - "\x02Ú¦\x02Ú„\x02Úƒ\x02Ú†\x02Ú‡\x02Ú\x02ÚŒ\x02ÚŽ\x02Úˆ\x02Ú˜\x02Ú‘\x02Ú©\x02Ú¯\x02Ú³" + - "\x02Ú±\x02Úº\x02Ú»\x02Û€\x02Û\x02Ú¾\x02Û’\x02Û“\x02Ú­\x02Û‡\x02Û†\x02Ûˆ\x02Û‹\x02Û…" + - "\x02Û‰\x02Û\x02Ù‰\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئÛ\x04ئى\x02ÛŒ\x04" + - "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + - "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + - "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + - "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04Ùج\x04ÙØ­\x04ÙØ®\x04ÙÙ…" + - "\x04ÙÙ‰\x04ÙÙŠ\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + - "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + - "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + - "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ÙÙ‘\x05" + - " ÙŽÙ‘\x05 ÙÙ‘\x05 ÙÙ‘\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + - "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + - "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + - "\x06Ù€ÙŽÙ‘\x06Ù€ÙÙ‘\x06Ù€ÙÙ‘\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + - "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + - "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + - "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + - "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + - "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + - "\x06Ùخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + - "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + - "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + - "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + - "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + - "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06Ùمي\x06بحي\x06سخي" + - "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + - "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + - "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + - "\x01%\x01@\x04ـً\x04Ù€ÙŽ\x04Ù€Ù\x04Ù€Ù\x04ـّ\x04ـْ\x02Ø¡\x02Ø¢\x02Ø£\x02ؤ\x02Ø¥" + - "\x02ئ\x02ا\x02ب\x02Ø©\x02ت\x02Ø«\x02ج\x02Ø­\x02Ø®\x02د\x02Ø°\x02ر\x02ز\x02س" + - "\x02Ø´\x02ص\x02ض\x02Ø·\x02ظ\x02ع\x02غ\x02Ù\x02Ù‚\x02Ùƒ\x02Ù„\x02Ù…\x02Ù†\x02Ù‡" + - "\x02Ùˆ\x02ÙŠ\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + - "\x02£\x02¬\x02¦\x02Â¥\x08ð…—ð…¥\x08ð…˜ð…¥\x0cð…˜ð…¥ð…®\x0cð…˜ð…¥ð…¯\x0cð…˜ð…¥ð…°\x0cð…˜ð…¥ð…±\x0cð…˜ð…¥ð…²\x08ð†¹" + - "ð…¥\x08ð†ºð…¥\x0cð†¹ð…¥ð…®\x0cð†ºð…¥ð…®\x0cð†¹ð…¥ð…¯\x0cð†ºð…¥ð…¯\x02ı\x02È·\x02α\x02ε\x02ζ\x02η\x02" + - "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02Ï„\x02Ï…\x02ψ\x03∇\x03∂\x02Ï\x02Ù®\x02Ú¡" + - "\x02Ù¯\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + - "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + - "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + - "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + - "c\x02mc\x02md\x02dj\x06ã»ã‹\x06ココ\x03サ\x03手\x03å­—\x03åŒ\x03デ\x03二\x03多\x03解" + - "\x03天\x03交\x03映\x03ç„¡\x03æ–™\x03å‰\x03後\x03å†\x03æ–°\x03åˆ\x03終\x03生\x03販\x03声" + - "\x03å¹\x03æ¼”\x03投\x03æ•\x03一\x03三\x03éŠ\x03å·¦\x03中\x03å³\x03指\x03èµ°\x03打\x03ç¦" + - "\x03空\x03åˆ\x03満\x03有\x03月\x03申\x03割\x03å–¶\x03é…\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + - "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔å‹ã€•\x09〔敗〕\x03å¾—\x03å¯\x03丽\x03丸\x03ä¹\x03ä½ \x03" + - "ä¾®\x03ä¾»\x03倂\x03åº\x03å‚™\x03僧\x03åƒ\x03ã’ž\x03å…\x03å…”\x03å…¤\x03å…·\x03ã’¹\x03å…§\x03" + - "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03ã“Ÿ\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + - "勤\x03勺\x03包\x03匆\x03北\x03å‰\x03å‘\x03åš\x03å³\x03å½\x03å¿\x03ç°\x03åŠ\x03åŸ\x03" + - "å«\x03å±\x03å†\x03å’ž\x03å¸\x03呈\x03周\x03å’¢\x03哶\x03å”\x03å•“\x03å•£\x03å–„\x03å–™\x03" + - "å–«\x03å–³\x03å—‚\x03圖\x03嘆\x03圗\x03噑\x03å™´\x03切\x03壮\x03城\x03埴\x03å \x03åž‹\x03" + - "å ²\x03å ±\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03ã›®\x03" + - "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03å°†\x03å°¢\x03ãž\x03å± \x03å±®\x03å³€\x03å²\x03" + - "嵃\x03åµ®\x03嵫\x03åµ¼\x03å·¡\x03å·¢\x03ã ¯\x03å·½\x03帨\x03帽\x03幩\x03ã¡¢\x03㡼\x03庰\x03" + - "庳\x03庶\x03廊\x03廾\x03èˆ\x03å¼¢\x03㣇\x03å½¢\x03彫\x03㣣\x03徚\x03å¿\x03å¿—\x03忹\x03" + - "æ‚\x03㤺\x03㤜\x03æ‚”\x03惇\x03æ…ˆ\x03æ…Œ\x03æ…Ž\x03æ…º\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + - "懲\x03懶\x03æˆ\x03戛\x03æ‰\x03抱\x03æ‹”\x03æ\x03挽\x03拼\x03æ¨\x03掃\x03æ¤\x03æ¢\x03" + - "æ…\x03掩\x03㨮\x03æ‘©\x03摾\x03æ’\x03æ‘·\x03㩬\x03æ•\x03敬\x03æ—£\x03書\x03晉\x03㬙\x03" + - "æš‘\x03㬈\x03㫤\x03冒\x03冕\x03最\x03æšœ\x03è‚­\x03ä™\x03朗\x03望\x03朡\x03æž\x03æ“\x03" + - "ã­‰\x03柺\x03æž…\x03æ¡’\x03梅\x03梎\x03æ Ÿ\x03椔\x03ã®\x03楂\x03榣\x03槪\x03檨\x03æ«›\x03" + - "ã°˜\x03次\x03æ­”\x03㱎\x03æ­²\x03殟\x03殺\x03æ®»\x03汎\x03沿\x03æ³\x03汧\x03æ´–\x03æ´¾\x03" + - "æµ·\x03æµ\x03浩\x03浸\x03涅\x03æ´´\x03港\x03æ¹®\x03ã´³\x03滋\x03滇\x03æ·¹\x03æ½®\x03濆\x03" + - "瀹\x03瀞\x03瀛\x03㶖\x03çŠ\x03ç½\x03ç·\x03ç‚­\x03ç……\x03熜\x03爨\x03爵\x03ç‰\x03犀\x03" + - "犕\x03çº\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03ç‘œ\x03瑱\x03ç’…\x03ç“Š\x03ã¼›\x03甤\x03甾\x03" + - "ç•°\x03ç˜\x03㿼\x03䀈\x03ç›´\x03眞\x03真\x03çŠ\x03䀹\x03çž‹\x03ä†\x03ä‚–\x03ç¡Ž\x03碌\x03" + - "磌\x03䃣\x03祖\x03ç¦\x03秫\x03䄯\x03ç©€\x03ç©Š\x03ç©\x03䈂\x03篆\x03築\x03䈧\x03ç³’\x03" + - "䊠\x03糨\x03ç³£\x03ç´€\x03çµ£\x03äŒ\x03ç·‡\x03縂\x03ç¹…\x03䌴\x03ä™\x03罺\x03羕\x03翺\x03" + - "者\x03è \x03è°\x03ä•\x03育\x03脃\x03ä‹\x03脾\x03媵\x03舄\x03辞\x03ä‘«\x03芑\x03芋\x03" + - "èŠ\x03劳\x03花\x03芳\x03芽\x03苦\x03è‹¥\x03èŒ\x03è£\x03莭\x03茣\x03莽\x03è§\x03è‘—\x03" + - "è“\x03èŠ\x03èŒ\x03èœ\x03䔫\x03蓱\x03蓳\x03è”–\x03蕤\x03ä•\x03ä•¡\x03ä•«\x03è™\x03虜\x03" + - "虧\x03虩\x03èš©\x03蚈\x03蜎\x03蛢\x03è¹\x03蜨\x03è«\x03螆\x03蟡\x03è \x03ä—¹\x03è¡ \x03" + - "è¡£\x03裗\x03裞\x03䘵\x03裺\x03ã’»\x03äš¾\x03䛇\x03誠\x03è«­\x03變\x03豕\x03貫\x03è³\x03" + - "è´›\x03èµ·\x03è·‹\x03趼\x03è·°\x03è»”\x03輸\x03é‚”\x03郱\x03é„‘\x03é„›\x03鈸\x03é‹—\x03鋘\x03" + - "鉼\x03é¹\x03é•\x03é–‹\x03䦕\x03é–·\x03䧦\x03雃\x03嶲\x03霣\x03ä©®\x03䩶\x03韠\x03䪲\x03" + - "é ‹\x03é ©\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03é±€\x03é³½\x03䳎\x03ä³­\x03" + - "鵧\x03䳸\x03麻\x03äµ–\x03黹\x03黾\x03é¼…\x03é¼\x03é¼–\x03é¼»" - -var xorData string = "" + // Size: 4855 bytes - "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + - "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + - "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + - "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + - "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + - "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + - "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + - "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + - "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + - "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + - "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + - "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + - "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + - "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + - "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + - "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + - "\x01\x0c#\x03Ê \x9d\x03Ê£\x9c\x03Ê¢\x9f\x03Ê¥\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + - "\x03Ê©\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + - "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + - "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + - "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + - "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + - "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + - "\x9c\x03Ø“\x89\x03ß”\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + - "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + - "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + - "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + - "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + - "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + - "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + - "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + - "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + - "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + - "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + - "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + - "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + - "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + - "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + - "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + - "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + - "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + - "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + - ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + - "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + - "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + - "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + - "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + - "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + - "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + - "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + - "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + - "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + - "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + - "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + - "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + - "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + - "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + - "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + - "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + - "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + - "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + - "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + - "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + - "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + - "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + - "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + - "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + - "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + - "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + - "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + - "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + - "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + - "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + - "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + - "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + - "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + - "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + - "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + - "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + - "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + - "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + - "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + - "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + - "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + - "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + - "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + - "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + - "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + - "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + - "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + - "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + - "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + - "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + - ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + - "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + - "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + - "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + - "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + - "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + - "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + - "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + - "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + - "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + - "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + - "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + - "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + - "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + - "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + - "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + - "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + - "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + - "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + - "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + - "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + - "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + - "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + - "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + - "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + - "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + - "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + - "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + - "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + - "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + - "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + - "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + - "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + - "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + - "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + - "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + - "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + - "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + - "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + - "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + - "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + - "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + - "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + - "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + - "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + - "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + - "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + - "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + - "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + - "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + - "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + - "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + - "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + - "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + - "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + - "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + - "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + - "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + - "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + - "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + - "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + - "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + - "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + - "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + - "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + - "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + - "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + - "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + - "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + - "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + - "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + - "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + - "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + - "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + - "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + - "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + - "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + - "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + - "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + - "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + - "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + - "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + - "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + - "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + - "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + - "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + - "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + - "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + - "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + - "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + - "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + - "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + - "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + - "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + - "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + - "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + - "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + - "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + - "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + - "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + - "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + - "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + - "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + - "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + - "\x04\x03\x0c?\x05\x03\x0c" + - "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + - "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + - "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + - "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + - "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + - "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + - "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + - "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + - "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + - "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + - "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + - "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + - "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + - "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + - "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + - "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + - "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + - "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + - "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + - "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + - "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + - "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + - "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + - "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + - "\x05\x22\x05\x03\x050\x1d" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// idnaTrie. Total size: 28496 bytes (27.83 KiB). Checksum: 43288b883596640e. -type idnaTrie struct{} - -func newIdnaTrie(i int) *idnaTrie { - return &idnaTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { - switch { - case n < 123: - return uint16(idnaValues[n<<6+uint32(b)]) - default: - n -= 123 - return uint16(idnaSparse.lookup(n, b)) - } -} - -// idnaValues: 125 blocks, 8000 entries, 16000 bytes -// The third block is the zero block. -var idnaValues = [8000]uint16{ - // Block 0x0, offset 0x0 - 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, - 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, - 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, - 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, - 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, - 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, - 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, - 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, - 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, - 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, - 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, - // Block 0x1, offset 0x40 - 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, - 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, - 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, - 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, - 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, - 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, - 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, - 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, - 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, - 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, - 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, - 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, - 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, - 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, - 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, - 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, - 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, - 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, - 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, - 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, - 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, - // Block 0x4, offset 0x100 - 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, - 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, - 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, - 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, - 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, - 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, - 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, - 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, - 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, - 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, - 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, - // Block 0x5, offset 0x140 - 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, - 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, - 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, - 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, - 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, - 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, - 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, - 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, - 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, - 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, - 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, - // Block 0x6, offset 0x180 - 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, - 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, - 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, - 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, - 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, - 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, - 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, - 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, - 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, - 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, - 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, - 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, - 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, - 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, - 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, - 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, - 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, - 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, - 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, - 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, - 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, - // Block 0x8, offset 0x200 - 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, - 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, - 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, - 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, - 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, - 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, - 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, - 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, - 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, - 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, - 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, - // Block 0x9, offset 0x240 - 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, - 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, - 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, - 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, - 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, - 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, - 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, - 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, - 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, - 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, - 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, - // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x1308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, - 0x286: 0x1308, 0x287: 0x1308, 0x288: 0x1308, 0x289: 0x1308, 0x28a: 0x1308, 0x28b: 0x1308, - 0x28c: 0x1308, 0x28d: 0x1308, 0x28e: 0x1308, 0x28f: 0x13c0, 0x290: 0x1308, 0x291: 0x1308, - 0x292: 0x1308, 0x293: 0x1308, 0x294: 0x1308, 0x295: 0x1308, 0x296: 0x1308, 0x297: 0x1308, - 0x298: 0x1308, 0x299: 0x1308, 0x29a: 0x1308, 0x29b: 0x1308, 0x29c: 0x1308, 0x29d: 0x1308, - 0x29e: 0x1308, 0x29f: 0x1308, 0x2a0: 0x1308, 0x2a1: 0x1308, 0x2a2: 0x1308, 0x2a3: 0x1308, - 0x2a4: 0x1308, 0x2a5: 0x1308, 0x2a6: 0x1308, 0x2a7: 0x1308, 0x2a8: 0x1308, 0x2a9: 0x1308, - 0x2aa: 0x1308, 0x2ab: 0x1308, 0x2ac: 0x1308, 0x2ad: 0x1308, 0x2ae: 0x1308, 0x2af: 0x1308, - 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, - 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, - 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, - 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, - 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, - 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, - 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, - 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, - 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, - 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, - 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, - 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, - 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, - // Block 0xc, offset 0x300 - 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, - 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, - 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, - 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, - 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, - 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, - 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, - 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, - 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, - 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, - 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, - // Block 0xd, offset 0x340 - 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, - 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, - 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, - 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, - 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, - 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, - 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, - 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, - 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, - 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, - 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, - // Block 0xe, offset 0x380 - 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x1308, 0x384: 0x1308, 0x385: 0x1308, - 0x386: 0x1308, 0x387: 0x1308, 0x388: 0x1318, 0x389: 0x1318, 0x38a: 0xe00d, 0x38b: 0x0008, - 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, - 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, - 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, - 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, - 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, - 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, - 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, - 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, - 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, - 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, - 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, - 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, - 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, - 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, - 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, - 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, - 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, - 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, - 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, - // Block 0x10, offset 0x400 - 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, - 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, - 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, - 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, - 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, - 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, - 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, - 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, - 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, - 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, - 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, - // Block 0x11, offset 0x440 - 0x440: 0x0040, 0x441: 0x0040, 0x442: 0x0040, 0x443: 0x0040, 0x444: 0x0040, 0x445: 0x0040, - 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0018, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0018, - 0x44c: 0x0018, 0x44d: 0x0018, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x1308, 0x451: 0x1308, - 0x452: 0x1308, 0x453: 0x1308, 0x454: 0x1308, 0x455: 0x1308, 0x456: 0x1308, 0x457: 0x1308, - 0x458: 0x1308, 0x459: 0x1308, 0x45a: 0x1308, 0x45b: 0x0018, 0x45c: 0x0340, 0x45d: 0x0040, - 0x45e: 0x0018, 0x45f: 0x0018, 0x460: 0x0208, 0x461: 0x0008, 0x462: 0x0408, 0x463: 0x0408, - 0x464: 0x0408, 0x465: 0x0408, 0x466: 0x0208, 0x467: 0x0408, 0x468: 0x0208, 0x469: 0x0408, - 0x46a: 0x0208, 0x46b: 0x0208, 0x46c: 0x0208, 0x46d: 0x0208, 0x46e: 0x0208, 0x46f: 0x0408, - 0x470: 0x0408, 0x471: 0x0408, 0x472: 0x0408, 0x473: 0x0208, 0x474: 0x0208, 0x475: 0x0208, - 0x476: 0x0208, 0x477: 0x0208, 0x478: 0x0208, 0x479: 0x0208, 0x47a: 0x0208, 0x47b: 0x0208, - 0x47c: 0x0208, 0x47d: 0x0208, 0x47e: 0x0208, 0x47f: 0x0208, - // Block 0x12, offset 0x480 - 0x480: 0x0408, 0x481: 0x0208, 0x482: 0x0208, 0x483: 0x0408, 0x484: 0x0408, 0x485: 0x0408, - 0x486: 0x0408, 0x487: 0x0408, 0x488: 0x0408, 0x489: 0x0408, 0x48a: 0x0408, 0x48b: 0x0408, - 0x48c: 0x0208, 0x48d: 0x0408, 0x48e: 0x0208, 0x48f: 0x0408, 0x490: 0x0208, 0x491: 0x0208, - 0x492: 0x0408, 0x493: 0x0408, 0x494: 0x0018, 0x495: 0x0408, 0x496: 0x1308, 0x497: 0x1308, - 0x498: 0x1308, 0x499: 0x1308, 0x49a: 0x1308, 0x49b: 0x1308, 0x49c: 0x1308, 0x49d: 0x0040, - 0x49e: 0x0018, 0x49f: 0x1308, 0x4a0: 0x1308, 0x4a1: 0x1308, 0x4a2: 0x1308, 0x4a3: 0x1308, - 0x4a4: 0x1308, 0x4a5: 0x0008, 0x4a6: 0x0008, 0x4a7: 0x1308, 0x4a8: 0x1308, 0x4a9: 0x0018, - 0x4aa: 0x1308, 0x4ab: 0x1308, 0x4ac: 0x1308, 0x4ad: 0x1308, 0x4ae: 0x0408, 0x4af: 0x0408, - 0x4b0: 0x0008, 0x4b1: 0x0008, 0x4b2: 0x0008, 0x4b3: 0x0008, 0x4b4: 0x0008, 0x4b5: 0x0008, - 0x4b6: 0x0008, 0x4b7: 0x0008, 0x4b8: 0x0008, 0x4b9: 0x0008, 0x4ba: 0x0208, 0x4bb: 0x0208, - 0x4bc: 0x0208, 0x4bd: 0x0008, 0x4be: 0x0008, 0x4bf: 0x0208, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x0018, 0x4c1: 0x0018, 0x4c2: 0x0018, 0x4c3: 0x0018, 0x4c4: 0x0018, 0x4c5: 0x0018, - 0x4c6: 0x0018, 0x4c7: 0x0018, 0x4c8: 0x0018, 0x4c9: 0x0018, 0x4ca: 0x0018, 0x4cb: 0x0018, - 0x4cc: 0x0018, 0x4cd: 0x0018, 0x4ce: 0x0040, 0x4cf: 0x0340, 0x4d0: 0x0408, 0x4d1: 0x1308, - 0x4d2: 0x0208, 0x4d3: 0x0208, 0x4d4: 0x0208, 0x4d5: 0x0408, 0x4d6: 0x0408, 0x4d7: 0x0408, - 0x4d8: 0x0408, 0x4d9: 0x0408, 0x4da: 0x0208, 0x4db: 0x0208, 0x4dc: 0x0208, 0x4dd: 0x0208, - 0x4de: 0x0408, 0x4df: 0x0208, 0x4e0: 0x0208, 0x4e1: 0x0208, 0x4e2: 0x0208, 0x4e3: 0x0208, - 0x4e4: 0x0208, 0x4e5: 0x0208, 0x4e6: 0x0208, 0x4e7: 0x0208, 0x4e8: 0x0408, 0x4e9: 0x0208, - 0x4ea: 0x0408, 0x4eb: 0x0208, 0x4ec: 0x0408, 0x4ed: 0x0208, 0x4ee: 0x0208, 0x4ef: 0x0408, - 0x4f0: 0x1308, 0x4f1: 0x1308, 0x4f2: 0x1308, 0x4f3: 0x1308, 0x4f4: 0x1308, 0x4f5: 0x1308, - 0x4f6: 0x1308, 0x4f7: 0x1308, 0x4f8: 0x1308, 0x4f9: 0x1308, 0x4fa: 0x1308, 0x4fb: 0x1308, - 0x4fc: 0x1308, 0x4fd: 0x1308, 0x4fe: 0x1308, 0x4ff: 0x1308, - // Block 0x14, offset 0x500 - 0x500: 0x1008, 0x501: 0x1308, 0x502: 0x1308, 0x503: 0x1308, 0x504: 0x1308, 0x505: 0x1308, - 0x506: 0x1308, 0x507: 0x1308, 0x508: 0x1308, 0x509: 0x1008, 0x50a: 0x1008, 0x50b: 0x1008, - 0x50c: 0x1008, 0x50d: 0x1b08, 0x50e: 0x1008, 0x50f: 0x1008, 0x510: 0x0008, 0x511: 0x1308, - 0x512: 0x1308, 0x513: 0x1308, 0x514: 0x1308, 0x515: 0x1308, 0x516: 0x1308, 0x517: 0x1308, - 0x518: 0x04c9, 0x519: 0x0501, 0x51a: 0x0539, 0x51b: 0x0571, 0x51c: 0x05a9, 0x51d: 0x05e1, - 0x51e: 0x0619, 0x51f: 0x0651, 0x520: 0x0008, 0x521: 0x0008, 0x522: 0x1308, 0x523: 0x1308, - 0x524: 0x0018, 0x525: 0x0018, 0x526: 0x0008, 0x527: 0x0008, 0x528: 0x0008, 0x529: 0x0008, - 0x52a: 0x0008, 0x52b: 0x0008, 0x52c: 0x0008, 0x52d: 0x0008, 0x52e: 0x0008, 0x52f: 0x0008, - 0x530: 0x0018, 0x531: 0x0008, 0x532: 0x0008, 0x533: 0x0008, 0x534: 0x0008, 0x535: 0x0008, - 0x536: 0x0008, 0x537: 0x0008, 0x538: 0x0008, 0x539: 0x0008, 0x53a: 0x0008, 0x53b: 0x0008, - 0x53c: 0x0008, 0x53d: 0x0008, 0x53e: 0x0008, 0x53f: 0x0008, - // Block 0x15, offset 0x540 - 0x540: 0x0008, 0x541: 0x1308, 0x542: 0x1008, 0x543: 0x1008, 0x544: 0x0040, 0x545: 0x0008, - 0x546: 0x0008, 0x547: 0x0008, 0x548: 0x0008, 0x549: 0x0008, 0x54a: 0x0008, 0x54b: 0x0008, - 0x54c: 0x0008, 0x54d: 0x0040, 0x54e: 0x0040, 0x54f: 0x0008, 0x550: 0x0008, 0x551: 0x0040, - 0x552: 0x0040, 0x553: 0x0008, 0x554: 0x0008, 0x555: 0x0008, 0x556: 0x0008, 0x557: 0x0008, - 0x558: 0x0008, 0x559: 0x0008, 0x55a: 0x0008, 0x55b: 0x0008, 0x55c: 0x0008, 0x55d: 0x0008, - 0x55e: 0x0008, 0x55f: 0x0008, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x0008, 0x563: 0x0008, - 0x564: 0x0008, 0x565: 0x0008, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0040, - 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, - 0x570: 0x0008, 0x571: 0x0040, 0x572: 0x0008, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, - 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0040, 0x57b: 0x0040, - 0x57c: 0x1308, 0x57d: 0x0008, 0x57e: 0x1008, 0x57f: 0x1008, - // Block 0x16, offset 0x580 - 0x580: 0x1008, 0x581: 0x1308, 0x582: 0x1308, 0x583: 0x1308, 0x584: 0x1308, 0x585: 0x0040, - 0x586: 0x0040, 0x587: 0x1008, 0x588: 0x1008, 0x589: 0x0040, 0x58a: 0x0040, 0x58b: 0x1008, - 0x58c: 0x1008, 0x58d: 0x1b08, 0x58e: 0x0008, 0x58f: 0x0040, 0x590: 0x0040, 0x591: 0x0040, - 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x1008, - 0x598: 0x0040, 0x599: 0x0040, 0x59a: 0x0040, 0x59b: 0x0040, 0x59c: 0x0689, 0x59d: 0x06c1, - 0x59e: 0x0040, 0x59f: 0x06f9, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x1308, 0x5a3: 0x1308, - 0x5a4: 0x0040, 0x5a5: 0x0040, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, - 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, - 0x5b0: 0x0008, 0x5b1: 0x0008, 0x5b2: 0x0018, 0x5b3: 0x0018, 0x5b4: 0x0018, 0x5b5: 0x0018, - 0x5b6: 0x0018, 0x5b7: 0x0018, 0x5b8: 0x0018, 0x5b9: 0x0018, 0x5ba: 0x0018, 0x5bb: 0x0018, - 0x5bc: 0x0040, 0x5bd: 0x0040, 0x5be: 0x0040, 0x5bf: 0x0040, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x0040, 0x5c1: 0x1308, 0x5c2: 0x1308, 0x5c3: 0x1008, 0x5c4: 0x0040, 0x5c5: 0x0008, - 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0040, - 0x5cc: 0x0040, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, - 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, - 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, - 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, - 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, - 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, - 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0731, 0x5f4: 0x0040, 0x5f5: 0x0008, - 0x5f6: 0x0769, 0x5f7: 0x0040, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, - 0x5fc: 0x1308, 0x5fd: 0x0040, 0x5fe: 0x1008, 0x5ff: 0x1008, - // Block 0x18, offset 0x600 - 0x600: 0x1008, 0x601: 0x1308, 0x602: 0x1308, 0x603: 0x0040, 0x604: 0x0040, 0x605: 0x0040, - 0x606: 0x0040, 0x607: 0x1308, 0x608: 0x1308, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x1308, - 0x60c: 0x1308, 0x60d: 0x1b08, 0x60e: 0x0040, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x1308, - 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x0040, - 0x618: 0x0040, 0x619: 0x07a1, 0x61a: 0x07d9, 0x61b: 0x0811, 0x61c: 0x0008, 0x61d: 0x0040, - 0x61e: 0x0849, 0x61f: 0x0040, 0x620: 0x0040, 0x621: 0x0040, 0x622: 0x0040, 0x623: 0x0040, - 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, - 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, - 0x630: 0x1308, 0x631: 0x1308, 0x632: 0x0008, 0x633: 0x0008, 0x634: 0x0008, 0x635: 0x1308, - 0x636: 0x0040, 0x637: 0x0040, 0x638: 0x0040, 0x639: 0x0040, 0x63a: 0x0040, 0x63b: 0x0040, - 0x63c: 0x0040, 0x63d: 0x0040, 0x63e: 0x0040, 0x63f: 0x0040, - // Block 0x19, offset 0x640 - 0x640: 0x0040, 0x641: 0x1308, 0x642: 0x1308, 0x643: 0x1008, 0x644: 0x0040, 0x645: 0x0008, - 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0008, - 0x64c: 0x0008, 0x64d: 0x0008, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0008, - 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, - 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, - 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, - 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, - 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0040, 0x675: 0x0008, - 0x676: 0x0008, 0x677: 0x0008, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, - 0x67c: 0x1308, 0x67d: 0x0008, 0x67e: 0x1008, 0x67f: 0x1008, - // Block 0x1a, offset 0x680 - 0x680: 0x1008, 0x681: 0x1308, 0x682: 0x1308, 0x683: 0x1308, 0x684: 0x1308, 0x685: 0x1308, - 0x686: 0x0040, 0x687: 0x1308, 0x688: 0x1308, 0x689: 0x1008, 0x68a: 0x0040, 0x68b: 0x1008, - 0x68c: 0x1008, 0x68d: 0x1b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0008, 0x691: 0x0040, - 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, - 0x698: 0x0040, 0x699: 0x0040, 0x69a: 0x0040, 0x69b: 0x0040, 0x69c: 0x0040, 0x69d: 0x0040, - 0x69e: 0x0040, 0x69f: 0x0040, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x1308, 0x6a3: 0x1308, - 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, - 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, - 0x6b0: 0x0018, 0x6b1: 0x0018, 0x6b2: 0x0040, 0x6b3: 0x0040, 0x6b4: 0x0040, 0x6b5: 0x0040, - 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, - 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x0040, 0x6c1: 0x1308, 0x6c2: 0x1008, 0x6c3: 0x1008, 0x6c4: 0x0040, 0x6c5: 0x0008, - 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, - 0x6cc: 0x0008, 0x6cd: 0x0040, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0040, - 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, - 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, - 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, - 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, - 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, - 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, - 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, - 0x6fc: 0x1308, 0x6fd: 0x0008, 0x6fe: 0x1008, 0x6ff: 0x1308, - // Block 0x1c, offset 0x700 - 0x700: 0x1008, 0x701: 0x1308, 0x702: 0x1308, 0x703: 0x1308, 0x704: 0x1308, 0x705: 0x0040, - 0x706: 0x0040, 0x707: 0x1008, 0x708: 0x1008, 0x709: 0x0040, 0x70a: 0x0040, 0x70b: 0x1008, - 0x70c: 0x1008, 0x70d: 0x1b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0040, 0x711: 0x0040, - 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x1308, 0x717: 0x1008, - 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0881, 0x71d: 0x08b9, - 0x71e: 0x0040, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x1308, 0x723: 0x1308, - 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, - 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, - 0x730: 0x0018, 0x731: 0x0008, 0x732: 0x0018, 0x733: 0x0018, 0x734: 0x0018, 0x735: 0x0018, - 0x736: 0x0018, 0x737: 0x0018, 0x738: 0x0040, 0x739: 0x0040, 0x73a: 0x0040, 0x73b: 0x0040, - 0x73c: 0x0040, 0x73d: 0x0040, 0x73e: 0x0040, 0x73f: 0x0040, - // Block 0x1d, offset 0x740 - 0x740: 0x0040, 0x741: 0x0040, 0x742: 0x1308, 0x743: 0x0008, 0x744: 0x0040, 0x745: 0x0008, - 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0040, - 0x74c: 0x0040, 0x74d: 0x0040, 0x74e: 0x0008, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, - 0x752: 0x0008, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0040, 0x757: 0x0040, - 0x758: 0x0040, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0040, 0x75c: 0x0008, 0x75d: 0x0040, - 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0040, 0x761: 0x0040, 0x762: 0x0040, 0x763: 0x0008, - 0x764: 0x0008, 0x765: 0x0040, 0x766: 0x0040, 0x767: 0x0040, 0x768: 0x0008, 0x769: 0x0008, - 0x76a: 0x0008, 0x76b: 0x0040, 0x76c: 0x0040, 0x76d: 0x0040, 0x76e: 0x0008, 0x76f: 0x0008, - 0x770: 0x0008, 0x771: 0x0008, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0008, 0x775: 0x0008, - 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, - 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x1008, 0x77f: 0x1008, - // Block 0x1e, offset 0x780 - 0x780: 0x1308, 0x781: 0x1008, 0x782: 0x1008, 0x783: 0x1008, 0x784: 0x1008, 0x785: 0x0040, - 0x786: 0x1308, 0x787: 0x1308, 0x788: 0x1308, 0x789: 0x0040, 0x78a: 0x1308, 0x78b: 0x1308, - 0x78c: 0x1308, 0x78d: 0x1b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, - 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x1308, 0x796: 0x1308, 0x797: 0x0040, - 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0040, 0x79d: 0x0040, - 0x79e: 0x0040, 0x79f: 0x0040, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x1308, 0x7a3: 0x1308, - 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, - 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, - 0x7b0: 0x0040, 0x7b1: 0x0040, 0x7b2: 0x0040, 0x7b3: 0x0040, 0x7b4: 0x0040, 0x7b5: 0x0040, - 0x7b6: 0x0040, 0x7b7: 0x0040, 0x7b8: 0x0018, 0x7b9: 0x0018, 0x7ba: 0x0018, 0x7bb: 0x0018, - 0x7bc: 0x0018, 0x7bd: 0x0018, 0x7be: 0x0018, 0x7bf: 0x0018, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0008, 0x7c1: 0x1308, 0x7c2: 0x1008, 0x7c3: 0x1008, 0x7c4: 0x0040, 0x7c5: 0x0008, - 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0008, - 0x7cc: 0x0008, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, - 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0008, 0x7d7: 0x0008, - 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0008, 0x7dc: 0x0008, 0x7dd: 0x0008, - 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x0008, 0x7e3: 0x0008, - 0x7e4: 0x0008, 0x7e5: 0x0008, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0040, - 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, - 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0040, 0x7f5: 0x0008, - 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, - 0x7fc: 0x1308, 0x7fd: 0x0008, 0x7fe: 0x1008, 0x7ff: 0x1308, - // Block 0x20, offset 0x800 - 0x800: 0x1008, 0x801: 0x1008, 0x802: 0x1008, 0x803: 0x1008, 0x804: 0x1008, 0x805: 0x0040, - 0x806: 0x1308, 0x807: 0x1008, 0x808: 0x1008, 0x809: 0x0040, 0x80a: 0x1008, 0x80b: 0x1008, - 0x80c: 0x1308, 0x80d: 0x1b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, - 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x1008, 0x816: 0x1008, 0x817: 0x0040, - 0x818: 0x0040, 0x819: 0x0040, 0x81a: 0x0040, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, - 0x81e: 0x0008, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x1308, 0x823: 0x1308, - 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, - 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, - 0x830: 0x0040, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, - 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0040, 0x839: 0x0040, 0x83a: 0x0040, 0x83b: 0x0040, - 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x0040, 0x83f: 0x0040, - // Block 0x21, offset 0x840 - 0x840: 0x1008, 0x841: 0x1308, 0x842: 0x1308, 0x843: 0x1308, 0x844: 0x1308, 0x845: 0x0040, - 0x846: 0x1008, 0x847: 0x1008, 0x848: 0x1008, 0x849: 0x0040, 0x84a: 0x1008, 0x84b: 0x1008, - 0x84c: 0x1008, 0x84d: 0x1b08, 0x84e: 0x0008, 0x84f: 0x0018, 0x850: 0x0040, 0x851: 0x0040, - 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x1008, - 0x858: 0x0018, 0x859: 0x0018, 0x85a: 0x0018, 0x85b: 0x0018, 0x85c: 0x0018, 0x85d: 0x0018, - 0x85e: 0x0018, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x1308, 0x863: 0x1308, - 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, - 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, - 0x870: 0x0018, 0x871: 0x0018, 0x872: 0x0018, 0x873: 0x0018, 0x874: 0x0018, 0x875: 0x0018, - 0x876: 0x0018, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0008, 0x87b: 0x0008, - 0x87c: 0x0008, 0x87d: 0x0008, 0x87e: 0x0008, 0x87f: 0x0008, - // Block 0x22, offset 0x880 - 0x880: 0x0040, 0x881: 0x0008, 0x882: 0x0008, 0x883: 0x0040, 0x884: 0x0008, 0x885: 0x0040, - 0x886: 0x0040, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0040, 0x88a: 0x0008, 0x88b: 0x0040, - 0x88c: 0x0040, 0x88d: 0x0008, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, - 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008, - 0x898: 0x0040, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008, - 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0040, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008, - 0x8a4: 0x0040, 0x8a5: 0x0008, 0x8a6: 0x0040, 0x8a7: 0x0008, 0x8a8: 0x0040, 0x8a9: 0x0040, - 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0040, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, - 0x8b0: 0x0008, 0x8b1: 0x1308, 0x8b2: 0x0008, 0x8b3: 0x0929, 0x8b4: 0x1308, 0x8b5: 0x1308, - 0x8b6: 0x1308, 0x8b7: 0x1308, 0x8b8: 0x1308, 0x8b9: 0x1308, 0x8ba: 0x0040, 0x8bb: 0x1308, - 0x8bc: 0x1308, 0x8bd: 0x0008, 0x8be: 0x0040, 0x8bf: 0x0040, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x0008, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x09d1, 0x8c4: 0x0008, 0x8c5: 0x0008, - 0x8c6: 0x0008, 0x8c7: 0x0008, 0x8c8: 0x0040, 0x8c9: 0x0008, 0x8ca: 0x0008, 0x8cb: 0x0008, - 0x8cc: 0x0008, 0x8cd: 0x0a09, 0x8ce: 0x0008, 0x8cf: 0x0008, 0x8d0: 0x0008, 0x8d1: 0x0008, - 0x8d2: 0x0a41, 0x8d3: 0x0008, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0a79, - 0x8d8: 0x0008, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0ab1, 0x8dd: 0x0008, - 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, - 0x8e4: 0x0008, 0x8e5: 0x0008, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0ae9, - 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0040, 0x8ee: 0x0040, 0x8ef: 0x0040, - 0x8f0: 0x0040, 0x8f1: 0x1308, 0x8f2: 0x1308, 0x8f3: 0x0b21, 0x8f4: 0x1308, 0x8f5: 0x0b59, - 0x8f6: 0x0b91, 0x8f7: 0x0bc9, 0x8f8: 0x0c19, 0x8f9: 0x0c51, 0x8fa: 0x1308, 0x8fb: 0x1308, - 0x8fc: 0x1308, 0x8fd: 0x1308, 0x8fe: 0x1308, 0x8ff: 0x1008, - // Block 0x24, offset 0x900 - 0x900: 0x1308, 0x901: 0x0ca1, 0x902: 0x1308, 0x903: 0x1308, 0x904: 0x1b08, 0x905: 0x0018, - 0x906: 0x1308, 0x907: 0x1308, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, - 0x90c: 0x0008, 0x90d: 0x1308, 0x90e: 0x1308, 0x90f: 0x1308, 0x910: 0x1308, 0x911: 0x1308, - 0x912: 0x1308, 0x913: 0x0cd9, 0x914: 0x1308, 0x915: 0x1308, 0x916: 0x1308, 0x917: 0x1308, - 0x918: 0x0040, 0x919: 0x1308, 0x91a: 0x1308, 0x91b: 0x1308, 0x91c: 0x1308, 0x91d: 0x0d11, - 0x91e: 0x1308, 0x91f: 0x1308, 0x920: 0x1308, 0x921: 0x1308, 0x922: 0x0d49, 0x923: 0x1308, - 0x924: 0x1308, 0x925: 0x1308, 0x926: 0x1308, 0x927: 0x0d81, 0x928: 0x1308, 0x929: 0x1308, - 0x92a: 0x1308, 0x92b: 0x1308, 0x92c: 0x0db9, 0x92d: 0x1308, 0x92e: 0x1308, 0x92f: 0x1308, - 0x930: 0x1308, 0x931: 0x1308, 0x932: 0x1308, 0x933: 0x1308, 0x934: 0x1308, 0x935: 0x1308, - 0x936: 0x1308, 0x937: 0x1308, 0x938: 0x1308, 0x939: 0x0df1, 0x93a: 0x1308, 0x93b: 0x1308, - 0x93c: 0x1308, 0x93d: 0x0040, 0x93e: 0x0018, 0x93f: 0x0018, - // Block 0x25, offset 0x940 - 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0008, 0x944: 0x0008, 0x945: 0x0008, - 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, - 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008, - 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008, - 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, - 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008, - 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0039, 0x96d: 0x0ed1, 0x96e: 0x0ee9, 0x96f: 0x0008, - 0x970: 0x0ef9, 0x971: 0x0f09, 0x972: 0x0f19, 0x973: 0x0f31, 0x974: 0x0249, 0x975: 0x0f41, - 0x976: 0x0259, 0x977: 0x0f51, 0x978: 0x0359, 0x979: 0x0f61, 0x97a: 0x0f71, 0x97b: 0x0008, - 0x97c: 0x00d9, 0x97d: 0x0f81, 0x97e: 0x0f99, 0x97f: 0x0269, - // Block 0x26, offset 0x980 - 0x980: 0x0fa9, 0x981: 0x0fb9, 0x982: 0x0279, 0x983: 0x0039, 0x984: 0x0fc9, 0x985: 0x0fe1, - 0x986: 0x059d, 0x987: 0x0ee9, 0x988: 0x0ef9, 0x989: 0x0f09, 0x98a: 0x0ff9, 0x98b: 0x1011, - 0x98c: 0x1029, 0x98d: 0x0f31, 0x98e: 0x0008, 0x98f: 0x0f51, 0x990: 0x0f61, 0x991: 0x1041, - 0x992: 0x00d9, 0x993: 0x1059, 0x994: 0x05b5, 0x995: 0x05b5, 0x996: 0x0f99, 0x997: 0x0fa9, - 0x998: 0x0fb9, 0x999: 0x059d, 0x99a: 0x1071, 0x99b: 0x1089, 0x99c: 0x05cd, 0x99d: 0x1099, - 0x99e: 0x10b1, 0x99f: 0x10c9, 0x9a0: 0x10e1, 0x9a1: 0x10f9, 0x9a2: 0x0f41, 0x9a3: 0x0269, - 0x9a4: 0x0fb9, 0x9a5: 0x1089, 0x9a6: 0x1099, 0x9a7: 0x10b1, 0x9a8: 0x1111, 0x9a9: 0x10e1, - 0x9aa: 0x10f9, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0008, 0x9ae: 0x0008, 0x9af: 0x0008, - 0x9b0: 0x0008, 0x9b1: 0x0008, 0x9b2: 0x0008, 0x9b3: 0x0008, 0x9b4: 0x0008, 0x9b5: 0x0008, - 0x9b6: 0x0008, 0x9b7: 0x0008, 0x9b8: 0x1129, 0x9b9: 0x0008, 0x9ba: 0x0008, 0x9bb: 0x0008, - 0x9bc: 0x0008, 0x9bd: 0x0008, 0x9be: 0x0008, 0x9bf: 0x0008, - // Block 0x27, offset 0x9c0 - 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, - 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, - 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, - 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, - 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x1141, 0x9dc: 0x1159, 0x9dd: 0x1169, - 0x9de: 0x1181, 0x9df: 0x1029, 0x9e0: 0x1199, 0x9e1: 0x11a9, 0x9e2: 0x11c1, 0x9e3: 0x11d9, - 0x9e4: 0x11f1, 0x9e5: 0x1209, 0x9e6: 0x1221, 0x9e7: 0x05e5, 0x9e8: 0x1239, 0x9e9: 0x1251, - 0x9ea: 0xe17d, 0x9eb: 0x1269, 0x9ec: 0x1281, 0x9ed: 0x1299, 0x9ee: 0x12b1, 0x9ef: 0x12c9, - 0x9f0: 0x12e1, 0x9f1: 0x12f9, 0x9f2: 0x1311, 0x9f3: 0x1329, 0x9f4: 0x1341, 0x9f5: 0x1359, - 0x9f6: 0x1371, 0x9f7: 0x1389, 0x9f8: 0x05fd, 0x9f9: 0x13a1, 0x9fa: 0x13b9, 0x9fb: 0x13d1, - 0x9fc: 0x13e1, 0x9fd: 0x13f9, 0x9fe: 0x1411, 0x9ff: 0x1429, - // Block 0x28, offset 0xa00 - 0xa00: 0xe00d, 0xa01: 0x0008, 0xa02: 0xe00d, 0xa03: 0x0008, 0xa04: 0xe00d, 0xa05: 0x0008, - 0xa06: 0xe00d, 0xa07: 0x0008, 0xa08: 0xe00d, 0xa09: 0x0008, 0xa0a: 0xe00d, 0xa0b: 0x0008, - 0xa0c: 0xe00d, 0xa0d: 0x0008, 0xa0e: 0xe00d, 0xa0f: 0x0008, 0xa10: 0xe00d, 0xa11: 0x0008, - 0xa12: 0xe00d, 0xa13: 0x0008, 0xa14: 0xe00d, 0xa15: 0x0008, 0xa16: 0xe00d, 0xa17: 0x0008, - 0xa18: 0xe00d, 0xa19: 0x0008, 0xa1a: 0xe00d, 0xa1b: 0x0008, 0xa1c: 0xe00d, 0xa1d: 0x0008, - 0xa1e: 0xe00d, 0xa1f: 0x0008, 0xa20: 0xe00d, 0xa21: 0x0008, 0xa22: 0xe00d, 0xa23: 0x0008, - 0xa24: 0xe00d, 0xa25: 0x0008, 0xa26: 0xe00d, 0xa27: 0x0008, 0xa28: 0xe00d, 0xa29: 0x0008, - 0xa2a: 0xe00d, 0xa2b: 0x0008, 0xa2c: 0xe00d, 0xa2d: 0x0008, 0xa2e: 0xe00d, 0xa2f: 0x0008, - 0xa30: 0xe00d, 0xa31: 0x0008, 0xa32: 0xe00d, 0xa33: 0x0008, 0xa34: 0xe00d, 0xa35: 0x0008, - 0xa36: 0xe00d, 0xa37: 0x0008, 0xa38: 0xe00d, 0xa39: 0x0008, 0xa3a: 0xe00d, 0xa3b: 0x0008, - 0xa3c: 0xe00d, 0xa3d: 0x0008, 0xa3e: 0xe00d, 0xa3f: 0x0008, - // Block 0x29, offset 0xa40 - 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, - 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, - 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, - 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, - 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0615, 0xa5b: 0x0635, 0xa5c: 0x0008, 0xa5d: 0x0008, - 0xa5e: 0x1441, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, - 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, - 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, - 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, - 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, - 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, - // Block 0x2a, offset 0xa80 - 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008, - 0xa86: 0x0040, 0xa87: 0x0040, 0xa88: 0xe045, 0xa89: 0xe045, 0xa8a: 0xe045, 0xa8b: 0xe045, - 0xa8c: 0xe045, 0xa8d: 0xe045, 0xa8e: 0x0040, 0xa8f: 0x0040, 0xa90: 0x0008, 0xa91: 0x0008, - 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, - 0xa98: 0x0040, 0xa99: 0xe045, 0xa9a: 0x0040, 0xa9b: 0xe045, 0xa9c: 0x0040, 0xa9d: 0xe045, - 0xa9e: 0x0040, 0xa9f: 0xe045, 0xaa0: 0x0008, 0xaa1: 0x0008, 0xaa2: 0x0008, 0xaa3: 0x0008, - 0xaa4: 0x0008, 0xaa5: 0x0008, 0xaa6: 0x0008, 0xaa7: 0x0008, 0xaa8: 0xe045, 0xaa9: 0xe045, - 0xaaa: 0xe045, 0xaab: 0xe045, 0xaac: 0xe045, 0xaad: 0xe045, 0xaae: 0xe045, 0xaaf: 0xe045, - 0xab0: 0x0008, 0xab1: 0x1459, 0xab2: 0x0008, 0xab3: 0x1471, 0xab4: 0x0008, 0xab5: 0x1489, - 0xab6: 0x0008, 0xab7: 0x14a1, 0xab8: 0x0008, 0xab9: 0x14b9, 0xaba: 0x0008, 0xabb: 0x14d1, - 0xabc: 0x0008, 0xabd: 0x14e9, 0xabe: 0x0040, 0xabf: 0x0040, - // Block 0x2b, offset 0xac0 - 0xac0: 0x1501, 0xac1: 0x1531, 0xac2: 0x1561, 0xac3: 0x1591, 0xac4: 0x15c1, 0xac5: 0x15f1, - 0xac6: 0x1621, 0xac7: 0x1651, 0xac8: 0x1501, 0xac9: 0x1531, 0xaca: 0x1561, 0xacb: 0x1591, - 0xacc: 0x15c1, 0xacd: 0x15f1, 0xace: 0x1621, 0xacf: 0x1651, 0xad0: 0x1681, 0xad1: 0x16b1, - 0xad2: 0x16e1, 0xad3: 0x1711, 0xad4: 0x1741, 0xad5: 0x1771, 0xad6: 0x17a1, 0xad7: 0x17d1, - 0xad8: 0x1681, 0xad9: 0x16b1, 0xada: 0x16e1, 0xadb: 0x1711, 0xadc: 0x1741, 0xadd: 0x1771, - 0xade: 0x17a1, 0xadf: 0x17d1, 0xae0: 0x1801, 0xae1: 0x1831, 0xae2: 0x1861, 0xae3: 0x1891, - 0xae4: 0x18c1, 0xae5: 0x18f1, 0xae6: 0x1921, 0xae7: 0x1951, 0xae8: 0x1801, 0xae9: 0x1831, - 0xaea: 0x1861, 0xaeb: 0x1891, 0xaec: 0x18c1, 0xaed: 0x18f1, 0xaee: 0x1921, 0xaef: 0x1951, - 0xaf0: 0x0008, 0xaf1: 0x0008, 0xaf2: 0x1981, 0xaf3: 0x19b1, 0xaf4: 0x19d9, 0xaf5: 0x0040, - 0xaf6: 0x0008, 0xaf7: 0x1a01, 0xaf8: 0xe045, 0xaf9: 0xe045, 0xafa: 0x064d, 0xafb: 0x1459, - 0xafc: 0x19b1, 0xafd: 0x0666, 0xafe: 0x1a31, 0xaff: 0x0686, - // Block 0x2c, offset 0xb00 - 0xb00: 0x06a6, 0xb01: 0x1a4a, 0xb02: 0x1a79, 0xb03: 0x1aa9, 0xb04: 0x1ad1, 0xb05: 0x0040, - 0xb06: 0x0008, 0xb07: 0x1af9, 0xb08: 0x06c5, 0xb09: 0x1471, 0xb0a: 0x06dd, 0xb0b: 0x1489, - 0xb0c: 0x1aa9, 0xb0d: 0x1b2a, 0xb0e: 0x1b5a, 0xb0f: 0x1b8a, 0xb10: 0x0008, 0xb11: 0x0008, - 0xb12: 0x0008, 0xb13: 0x1bb9, 0xb14: 0x0040, 0xb15: 0x0040, 0xb16: 0x0008, 0xb17: 0x0008, - 0xb18: 0xe045, 0xb19: 0xe045, 0xb1a: 0x06f5, 0xb1b: 0x14a1, 0xb1c: 0x0040, 0xb1d: 0x1bd2, - 0xb1e: 0x1c02, 0xb1f: 0x1c32, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x1c61, - 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, - 0xb2a: 0x070d, 0xb2b: 0x14d1, 0xb2c: 0xe04d, 0xb2d: 0x1c7a, 0xb2e: 0x03d2, 0xb2f: 0x1caa, - 0xb30: 0x0040, 0xb31: 0x0040, 0xb32: 0x1cb9, 0xb33: 0x1ce9, 0xb34: 0x1d11, 0xb35: 0x0040, - 0xb36: 0x0008, 0xb37: 0x1d39, 0xb38: 0x0725, 0xb39: 0x14b9, 0xb3a: 0x0515, 0xb3b: 0x14e9, - 0xb3c: 0x1ce9, 0xb3d: 0x073e, 0xb3e: 0x075e, 0xb3f: 0x0040, - // Block 0x2d, offset 0xb40 - 0xb40: 0x000a, 0xb41: 0x000a, 0xb42: 0x000a, 0xb43: 0x000a, 0xb44: 0x000a, 0xb45: 0x000a, - 0xb46: 0x000a, 0xb47: 0x000a, 0xb48: 0x000a, 0xb49: 0x000a, 0xb4a: 0x000a, 0xb4b: 0x03c0, - 0xb4c: 0x0003, 0xb4d: 0x0003, 0xb4e: 0x0340, 0xb4f: 0x0340, 0xb50: 0x0018, 0xb51: 0xe00d, - 0xb52: 0x0018, 0xb53: 0x0018, 0xb54: 0x0018, 0xb55: 0x0018, 0xb56: 0x0018, 0xb57: 0x077e, - 0xb58: 0x0018, 0xb59: 0x0018, 0xb5a: 0x0018, 0xb5b: 0x0018, 0xb5c: 0x0018, 0xb5d: 0x0018, - 0xb5e: 0x0018, 0xb5f: 0x0018, 0xb60: 0x0018, 0xb61: 0x0018, 0xb62: 0x0018, 0xb63: 0x0018, - 0xb64: 0x0040, 0xb65: 0x0040, 0xb66: 0x0040, 0xb67: 0x0018, 0xb68: 0x0040, 0xb69: 0x0040, - 0xb6a: 0x0340, 0xb6b: 0x0340, 0xb6c: 0x0340, 0xb6d: 0x0340, 0xb6e: 0x0340, 0xb6f: 0x000a, - 0xb70: 0x0018, 0xb71: 0x0018, 0xb72: 0x0018, 0xb73: 0x1d69, 0xb74: 0x1da1, 0xb75: 0x0018, - 0xb76: 0x1df1, 0xb77: 0x1e29, 0xb78: 0x0018, 0xb79: 0x0018, 0xb7a: 0x0018, 0xb7b: 0x0018, - 0xb7c: 0x1e7a, 0xb7d: 0x0018, 0xb7e: 0x079e, 0xb7f: 0x0018, - // Block 0x2e, offset 0xb80 - 0xb80: 0x0018, 0xb81: 0x0018, 0xb82: 0x0018, 0xb83: 0x0018, 0xb84: 0x0018, 0xb85: 0x0018, - 0xb86: 0x0018, 0xb87: 0x1e92, 0xb88: 0x1eaa, 0xb89: 0x1ec2, 0xb8a: 0x0018, 0xb8b: 0x0018, - 0xb8c: 0x0018, 0xb8d: 0x0018, 0xb8e: 0x0018, 0xb8f: 0x0018, 0xb90: 0x0018, 0xb91: 0x0018, - 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x1ed9, - 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, - 0xb9e: 0x0018, 0xb9f: 0x000a, 0xba0: 0x03c0, 0xba1: 0x0340, 0xba2: 0x0340, 0xba3: 0x0340, - 0xba4: 0x03c0, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0040, 0xba8: 0x0040, 0xba9: 0x0040, - 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x0340, - 0xbb0: 0x1f41, 0xbb1: 0x0f41, 0xbb2: 0x0040, 0xbb3: 0x0040, 0xbb4: 0x1f51, 0xbb5: 0x1f61, - 0xbb6: 0x1f71, 0xbb7: 0x1f81, 0xbb8: 0x1f91, 0xbb9: 0x1fa1, 0xbba: 0x1fb2, 0xbbb: 0x07bd, - 0xbbc: 0x1fc2, 0xbbd: 0x1fd2, 0xbbe: 0x1fe2, 0xbbf: 0x0f71, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x1f41, 0xbc1: 0x00c9, 0xbc2: 0x0069, 0xbc3: 0x0079, 0xbc4: 0x1f51, 0xbc5: 0x1f61, - 0xbc6: 0x1f71, 0xbc7: 0x1f81, 0xbc8: 0x1f91, 0xbc9: 0x1fa1, 0xbca: 0x1fb2, 0xbcb: 0x07d5, - 0xbcc: 0x1fc2, 0xbcd: 0x1fd2, 0xbce: 0x1fe2, 0xbcf: 0x0040, 0xbd0: 0x0039, 0xbd1: 0x0f09, - 0xbd2: 0x00d9, 0xbd3: 0x0369, 0xbd4: 0x0ff9, 0xbd5: 0x0249, 0xbd6: 0x0f51, 0xbd7: 0x0359, - 0xbd8: 0x0f61, 0xbd9: 0x0f71, 0xbda: 0x0f99, 0xbdb: 0x01d9, 0xbdc: 0x0fa9, 0xbdd: 0x0040, - 0xbde: 0x0040, 0xbdf: 0x0040, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, - 0xbe4: 0x0018, 0xbe5: 0x0018, 0xbe6: 0x0018, 0xbe7: 0x0018, 0xbe8: 0x1ff1, 0xbe9: 0x0018, - 0xbea: 0x0018, 0xbeb: 0x0018, 0xbec: 0x0018, 0xbed: 0x0018, 0xbee: 0x0018, 0xbef: 0x0018, - 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0018, 0xbf4: 0x0018, 0xbf5: 0x0018, - 0xbf6: 0x0018, 0xbf7: 0x0018, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, - 0xbfc: 0x0018, 0xbfd: 0x0018, 0xbfe: 0x0018, 0xbff: 0x0040, - // Block 0x30, offset 0xc00 - 0xc00: 0x07ee, 0xc01: 0x080e, 0xc02: 0x1159, 0xc03: 0x082d, 0xc04: 0x0018, 0xc05: 0x084e, - 0xc06: 0x086e, 0xc07: 0x1011, 0xc08: 0x0018, 0xc09: 0x088d, 0xc0a: 0x0f31, 0xc0b: 0x0249, - 0xc0c: 0x0249, 0xc0d: 0x0249, 0xc0e: 0x0249, 0xc0f: 0x2009, 0xc10: 0x0f41, 0xc11: 0x0f41, - 0xc12: 0x0359, 0xc13: 0x0359, 0xc14: 0x0018, 0xc15: 0x0f71, 0xc16: 0x2021, 0xc17: 0x0018, - 0xc18: 0x0018, 0xc19: 0x0f99, 0xc1a: 0x2039, 0xc1b: 0x0269, 0xc1c: 0x0269, 0xc1d: 0x0269, - 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x2049, 0xc21: 0x08ad, 0xc22: 0x2061, 0xc23: 0x0018, - 0xc24: 0x13d1, 0xc25: 0x0018, 0xc26: 0x2079, 0xc27: 0x0018, 0xc28: 0x13d1, 0xc29: 0x0018, - 0xc2a: 0x0f51, 0xc2b: 0x2091, 0xc2c: 0x0ee9, 0xc2d: 0x1159, 0xc2e: 0x0018, 0xc2f: 0x0f09, - 0xc30: 0x0f09, 0xc31: 0x1199, 0xc32: 0x0040, 0xc33: 0x0f61, 0xc34: 0x00d9, 0xc35: 0x20a9, - 0xc36: 0x20c1, 0xc37: 0x20d9, 0xc38: 0x20f1, 0xc39: 0x0f41, 0xc3a: 0x0018, 0xc3b: 0x08cd, - 0xc3c: 0x2109, 0xc3d: 0x10b1, 0xc3e: 0x10b1, 0xc3f: 0x2109, - // Block 0x31, offset 0xc40 - 0xc40: 0x08ed, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0ef9, - 0xc46: 0x0ef9, 0xc47: 0x0f09, 0xc48: 0x0f41, 0xc49: 0x0259, 0xc4a: 0x0018, 0xc4b: 0x0018, - 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0008, 0xc4f: 0x0018, 0xc50: 0x2121, 0xc51: 0x2151, - 0xc52: 0x2181, 0xc53: 0x21b9, 0xc54: 0x21e9, 0xc55: 0x2219, 0xc56: 0x2249, 0xc57: 0x2279, - 0xc58: 0x22a9, 0xc59: 0x22d9, 0xc5a: 0x2309, 0xc5b: 0x2339, 0xc5c: 0x2369, 0xc5d: 0x2399, - 0xc5e: 0x23c9, 0xc5f: 0x23f9, 0xc60: 0x0f41, 0xc61: 0x2421, 0xc62: 0x0905, 0xc63: 0x2439, - 0xc64: 0x1089, 0xc65: 0x2451, 0xc66: 0x0925, 0xc67: 0x2469, 0xc68: 0x2491, 0xc69: 0x0369, - 0xc6a: 0x24a9, 0xc6b: 0x0945, 0xc6c: 0x0359, 0xc6d: 0x1159, 0xc6e: 0x0ef9, 0xc6f: 0x0f61, - 0xc70: 0x0f41, 0xc71: 0x2421, 0xc72: 0x0965, 0xc73: 0x2439, 0xc74: 0x1089, 0xc75: 0x2451, - 0xc76: 0x0985, 0xc77: 0x2469, 0xc78: 0x2491, 0xc79: 0x0369, 0xc7a: 0x24a9, 0xc7b: 0x09a5, - 0xc7c: 0x0359, 0xc7d: 0x1159, 0xc7e: 0x0ef9, 0xc7f: 0x0f61, - // Block 0x32, offset 0xc80 - 0xc80: 0x0018, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0018, - 0xc86: 0x0018, 0xc87: 0x0018, 0xc88: 0x0018, 0xc89: 0x0018, 0xc8a: 0x0018, 0xc8b: 0x0040, - 0xc8c: 0x0040, 0xc8d: 0x0040, 0xc8e: 0x0040, 0xc8f: 0x0040, 0xc90: 0x0040, 0xc91: 0x0040, - 0xc92: 0x0040, 0xc93: 0x0040, 0xc94: 0x0040, 0xc95: 0x0040, 0xc96: 0x0040, 0xc97: 0x0040, - 0xc98: 0x0040, 0xc99: 0x0040, 0xc9a: 0x0040, 0xc9b: 0x0040, 0xc9c: 0x0040, 0xc9d: 0x0040, - 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x00c9, 0xca1: 0x0069, 0xca2: 0x0079, 0xca3: 0x1f51, - 0xca4: 0x1f61, 0xca5: 0x1f71, 0xca6: 0x1f81, 0xca7: 0x1f91, 0xca8: 0x1fa1, 0xca9: 0x2601, - 0xcaa: 0x2619, 0xcab: 0x2631, 0xcac: 0x2649, 0xcad: 0x2661, 0xcae: 0x2679, 0xcaf: 0x2691, - 0xcb0: 0x26a9, 0xcb1: 0x26c1, 0xcb2: 0x26d9, 0xcb3: 0x26f1, 0xcb4: 0x0a06, 0xcb5: 0x0a26, - 0xcb6: 0x0a46, 0xcb7: 0x0a66, 0xcb8: 0x0a86, 0xcb9: 0x0aa6, 0xcba: 0x0ac6, 0xcbb: 0x0ae6, - 0xcbc: 0x0b06, 0xcbd: 0x270a, 0xcbe: 0x2732, 0xcbf: 0x275a, - // Block 0x33, offset 0xcc0 - 0xcc0: 0x2782, 0xcc1: 0x27aa, 0xcc2: 0x27d2, 0xcc3: 0x27fa, 0xcc4: 0x2822, 0xcc5: 0x284a, - 0xcc6: 0x2872, 0xcc7: 0x289a, 0xcc8: 0x0040, 0xcc9: 0x0040, 0xcca: 0x0040, 0xccb: 0x0040, - 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, - 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, - 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0b26, 0xcdd: 0x0b46, - 0xcde: 0x0b66, 0xcdf: 0x0b86, 0xce0: 0x0ba6, 0xce1: 0x0bc6, 0xce2: 0x0be6, 0xce3: 0x0c06, - 0xce4: 0x0c26, 0xce5: 0x0c46, 0xce6: 0x0c66, 0xce7: 0x0c86, 0xce8: 0x0ca6, 0xce9: 0x0cc6, - 0xcea: 0x0ce6, 0xceb: 0x0d06, 0xcec: 0x0d26, 0xced: 0x0d46, 0xcee: 0x0d66, 0xcef: 0x0d86, - 0xcf0: 0x0da6, 0xcf1: 0x0dc6, 0xcf2: 0x0de6, 0xcf3: 0x0e06, 0xcf4: 0x0e26, 0xcf5: 0x0e46, - 0xcf6: 0x0039, 0xcf7: 0x0ee9, 0xcf8: 0x1159, 0xcf9: 0x0ef9, 0xcfa: 0x0f09, 0xcfb: 0x1199, - 0xcfc: 0x0f31, 0xcfd: 0x0249, 0xcfe: 0x0f41, 0xcff: 0x0259, - // Block 0x34, offset 0xd00 - 0xd00: 0x0f51, 0xd01: 0x0359, 0xd02: 0x0f61, 0xd03: 0x0f71, 0xd04: 0x00d9, 0xd05: 0x0f99, - 0xd06: 0x2039, 0xd07: 0x0269, 0xd08: 0x01d9, 0xd09: 0x0fa9, 0xd0a: 0x0fb9, 0xd0b: 0x1089, - 0xd0c: 0x0279, 0xd0d: 0x0369, 0xd0e: 0x0289, 0xd0f: 0x13d1, 0xd10: 0x0039, 0xd11: 0x0ee9, - 0xd12: 0x1159, 0xd13: 0x0ef9, 0xd14: 0x0f09, 0xd15: 0x1199, 0xd16: 0x0f31, 0xd17: 0x0249, - 0xd18: 0x0f41, 0xd19: 0x0259, 0xd1a: 0x0f51, 0xd1b: 0x0359, 0xd1c: 0x0f61, 0xd1d: 0x0f71, - 0xd1e: 0x00d9, 0xd1f: 0x0f99, 0xd20: 0x2039, 0xd21: 0x0269, 0xd22: 0x01d9, 0xd23: 0x0fa9, - 0xd24: 0x0fb9, 0xd25: 0x1089, 0xd26: 0x0279, 0xd27: 0x0369, 0xd28: 0x0289, 0xd29: 0x13d1, - 0xd2a: 0x1f41, 0xd2b: 0x0018, 0xd2c: 0x0018, 0xd2d: 0x0018, 0xd2e: 0x0018, 0xd2f: 0x0018, - 0xd30: 0x0018, 0xd31: 0x0018, 0xd32: 0x0018, 0xd33: 0x0018, 0xd34: 0x0018, 0xd35: 0x0018, - 0xd36: 0x0018, 0xd37: 0x0018, 0xd38: 0x0018, 0xd39: 0x0018, 0xd3a: 0x0018, 0xd3b: 0x0018, - 0xd3c: 0x0018, 0xd3d: 0x0018, 0xd3e: 0x0018, 0xd3f: 0x0018, - // Block 0x35, offset 0xd40 - 0xd40: 0x0008, 0xd41: 0x0008, 0xd42: 0x0008, 0xd43: 0x0008, 0xd44: 0x0008, 0xd45: 0x0008, - 0xd46: 0x0008, 0xd47: 0x0008, 0xd48: 0x0008, 0xd49: 0x0008, 0xd4a: 0x0008, 0xd4b: 0x0008, - 0xd4c: 0x0008, 0xd4d: 0x0008, 0xd4e: 0x0008, 0xd4f: 0x0008, 0xd50: 0x0008, 0xd51: 0x0008, - 0xd52: 0x0008, 0xd53: 0x0008, 0xd54: 0x0008, 0xd55: 0x0008, 0xd56: 0x0008, 0xd57: 0x0008, - 0xd58: 0x0008, 0xd59: 0x0008, 0xd5a: 0x0008, 0xd5b: 0x0008, 0xd5c: 0x0008, 0xd5d: 0x0008, - 0xd5e: 0x0008, 0xd5f: 0x0040, 0xd60: 0xe00d, 0xd61: 0x0008, 0xd62: 0x2971, 0xd63: 0x0ebd, - 0xd64: 0x2989, 0xd65: 0x0008, 0xd66: 0x0008, 0xd67: 0xe07d, 0xd68: 0x0008, 0xd69: 0xe01d, - 0xd6a: 0x0008, 0xd6b: 0xe03d, 0xd6c: 0x0008, 0xd6d: 0x0fe1, 0xd6e: 0x1281, 0xd6f: 0x0fc9, - 0xd70: 0x1141, 0xd71: 0x0008, 0xd72: 0xe00d, 0xd73: 0x0008, 0xd74: 0x0008, 0xd75: 0xe01d, - 0xd76: 0x0008, 0xd77: 0x0008, 0xd78: 0x0008, 0xd79: 0x0008, 0xd7a: 0x0008, 0xd7b: 0x0008, - 0xd7c: 0x0259, 0xd7d: 0x1089, 0xd7e: 0x29a1, 0xd7f: 0x29b9, - // Block 0x36, offset 0xd80 - 0xd80: 0xe00d, 0xd81: 0x0008, 0xd82: 0xe00d, 0xd83: 0x0008, 0xd84: 0xe00d, 0xd85: 0x0008, - 0xd86: 0xe00d, 0xd87: 0x0008, 0xd88: 0xe00d, 0xd89: 0x0008, 0xd8a: 0xe00d, 0xd8b: 0x0008, - 0xd8c: 0xe00d, 0xd8d: 0x0008, 0xd8e: 0xe00d, 0xd8f: 0x0008, 0xd90: 0xe00d, 0xd91: 0x0008, - 0xd92: 0xe00d, 0xd93: 0x0008, 0xd94: 0xe00d, 0xd95: 0x0008, 0xd96: 0xe00d, 0xd97: 0x0008, - 0xd98: 0xe00d, 0xd99: 0x0008, 0xd9a: 0xe00d, 0xd9b: 0x0008, 0xd9c: 0xe00d, 0xd9d: 0x0008, - 0xd9e: 0xe00d, 0xd9f: 0x0008, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0xe00d, 0xda3: 0x0008, - 0xda4: 0x0008, 0xda5: 0x0018, 0xda6: 0x0018, 0xda7: 0x0018, 0xda8: 0x0018, 0xda9: 0x0018, - 0xdaa: 0x0018, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0xe01d, 0xdae: 0x0008, 0xdaf: 0x1308, - 0xdb0: 0x1308, 0xdb1: 0x1308, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0040, 0xdb5: 0x0040, - 0xdb6: 0x0040, 0xdb7: 0x0040, 0xdb8: 0x0040, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, - 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x26fd, 0xdc1: 0x271d, 0xdc2: 0x273d, 0xdc3: 0x275d, 0xdc4: 0x277d, 0xdc5: 0x279d, - 0xdc6: 0x27bd, 0xdc7: 0x27dd, 0xdc8: 0x27fd, 0xdc9: 0x281d, 0xdca: 0x283d, 0xdcb: 0x285d, - 0xdcc: 0x287d, 0xdcd: 0x289d, 0xdce: 0x28bd, 0xdcf: 0x28dd, 0xdd0: 0x28fd, 0xdd1: 0x291d, - 0xdd2: 0x293d, 0xdd3: 0x295d, 0xdd4: 0x297d, 0xdd5: 0x299d, 0xdd6: 0x0040, 0xdd7: 0x0040, - 0xdd8: 0x0040, 0xdd9: 0x0040, 0xdda: 0x0040, 0xddb: 0x0040, 0xddc: 0x0040, 0xddd: 0x0040, - 0xdde: 0x0040, 0xddf: 0x0040, 0xde0: 0x0040, 0xde1: 0x0040, 0xde2: 0x0040, 0xde3: 0x0040, - 0xde4: 0x0040, 0xde5: 0x0040, 0xde6: 0x0040, 0xde7: 0x0040, 0xde8: 0x0040, 0xde9: 0x0040, - 0xdea: 0x0040, 0xdeb: 0x0040, 0xdec: 0x0040, 0xded: 0x0040, 0xdee: 0x0040, 0xdef: 0x0040, - 0xdf0: 0x0040, 0xdf1: 0x0040, 0xdf2: 0x0040, 0xdf3: 0x0040, 0xdf4: 0x0040, 0xdf5: 0x0040, - 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0040, 0xdfa: 0x0040, 0xdfb: 0x0040, - 0xdfc: 0x0040, 0xdfd: 0x0040, 0xdfe: 0x0040, 0xdff: 0x0040, - // Block 0x38, offset 0xe00 - 0xe00: 0x000a, 0xe01: 0x0018, 0xe02: 0x29d1, 0xe03: 0x0018, 0xe04: 0x0018, 0xe05: 0x0008, - 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0018, 0xe09: 0x0018, 0xe0a: 0x0018, 0xe0b: 0x0018, - 0xe0c: 0x0018, 0xe0d: 0x0018, 0xe0e: 0x0018, 0xe0f: 0x0018, 0xe10: 0x0018, 0xe11: 0x0018, - 0xe12: 0x0018, 0xe13: 0x0018, 0xe14: 0x0018, 0xe15: 0x0018, 0xe16: 0x0018, 0xe17: 0x0018, - 0xe18: 0x0018, 0xe19: 0x0018, 0xe1a: 0x0018, 0xe1b: 0x0018, 0xe1c: 0x0018, 0xe1d: 0x0018, - 0xe1e: 0x0018, 0xe1f: 0x0018, 0xe20: 0x0018, 0xe21: 0x0018, 0xe22: 0x0018, 0xe23: 0x0018, - 0xe24: 0x0018, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, - 0xe2a: 0x1308, 0xe2b: 0x1308, 0xe2c: 0x1308, 0xe2d: 0x1308, 0xe2e: 0x1018, 0xe2f: 0x1018, - 0xe30: 0x0018, 0xe31: 0x0018, 0xe32: 0x0018, 0xe33: 0x0018, 0xe34: 0x0018, 0xe35: 0x0018, - 0xe36: 0xe125, 0xe37: 0x0018, 0xe38: 0x29bd, 0xe39: 0x29dd, 0xe3a: 0x29fd, 0xe3b: 0x0018, - 0xe3c: 0x0008, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, - // Block 0x39, offset 0xe40 - 0xe40: 0x2b3d, 0xe41: 0x2b5d, 0xe42: 0x2b7d, 0xe43: 0x2b9d, 0xe44: 0x2bbd, 0xe45: 0x2bdd, - 0xe46: 0x2bdd, 0xe47: 0x2bdd, 0xe48: 0x2bfd, 0xe49: 0x2bfd, 0xe4a: 0x2bfd, 0xe4b: 0x2bfd, - 0xe4c: 0x2c1d, 0xe4d: 0x2c1d, 0xe4e: 0x2c1d, 0xe4f: 0x2c3d, 0xe50: 0x2c5d, 0xe51: 0x2c5d, - 0xe52: 0x2a7d, 0xe53: 0x2a7d, 0xe54: 0x2c5d, 0xe55: 0x2c5d, 0xe56: 0x2c7d, 0xe57: 0x2c7d, - 0xe58: 0x2c5d, 0xe59: 0x2c5d, 0xe5a: 0x2a7d, 0xe5b: 0x2a7d, 0xe5c: 0x2c5d, 0xe5d: 0x2c5d, - 0xe5e: 0x2c3d, 0xe5f: 0x2c3d, 0xe60: 0x2c9d, 0xe61: 0x2c9d, 0xe62: 0x2cbd, 0xe63: 0x2cbd, - 0xe64: 0x0040, 0xe65: 0x2cdd, 0xe66: 0x2cfd, 0xe67: 0x2d1d, 0xe68: 0x2d1d, 0xe69: 0x2d3d, - 0xe6a: 0x2d5d, 0xe6b: 0x2d7d, 0xe6c: 0x2d9d, 0xe6d: 0x2dbd, 0xe6e: 0x2ddd, 0xe6f: 0x2dfd, - 0xe70: 0x2e1d, 0xe71: 0x2e3d, 0xe72: 0x2e3d, 0xe73: 0x2e5d, 0xe74: 0x2e7d, 0xe75: 0x2e7d, - 0xe76: 0x2e9d, 0xe77: 0x2ebd, 0xe78: 0x2e5d, 0xe79: 0x2edd, 0xe7a: 0x2efd, 0xe7b: 0x2edd, - 0xe7c: 0x2e5d, 0xe7d: 0x2f1d, 0xe7e: 0x2f3d, 0xe7f: 0x2f5d, - // Block 0x3a, offset 0xe80 - 0xe80: 0x2f7d, 0xe81: 0x2f9d, 0xe82: 0x2cfd, 0xe83: 0x2cdd, 0xe84: 0x2fbd, 0xe85: 0x2fdd, - 0xe86: 0x2ffd, 0xe87: 0x301d, 0xe88: 0x303d, 0xe89: 0x305d, 0xe8a: 0x307d, 0xe8b: 0x309d, - 0xe8c: 0x30bd, 0xe8d: 0x30dd, 0xe8e: 0x30fd, 0xe8f: 0x0040, 0xe90: 0x0018, 0xe91: 0x0018, - 0xe92: 0x311d, 0xe93: 0x313d, 0xe94: 0x315d, 0xe95: 0x317d, 0xe96: 0x319d, 0xe97: 0x31bd, - 0xe98: 0x31dd, 0xe99: 0x31fd, 0xe9a: 0x321d, 0xe9b: 0x323d, 0xe9c: 0x315d, 0xe9d: 0x325d, - 0xe9e: 0x327d, 0xe9f: 0x329d, 0xea0: 0x0008, 0xea1: 0x0008, 0xea2: 0x0008, 0xea3: 0x0008, - 0xea4: 0x0008, 0xea5: 0x0008, 0xea6: 0x0008, 0xea7: 0x0008, 0xea8: 0x0008, 0xea9: 0x0008, - 0xeaa: 0x0008, 0xeab: 0x0008, 0xeac: 0x0008, 0xead: 0x0008, 0xeae: 0x0008, 0xeaf: 0x0008, - 0xeb0: 0x0008, 0xeb1: 0x0008, 0xeb2: 0x0008, 0xeb3: 0x0008, 0xeb4: 0x0008, 0xeb5: 0x0008, - 0xeb6: 0x0008, 0xeb7: 0x0008, 0xeb8: 0x0008, 0xeb9: 0x0008, 0xeba: 0x0008, 0xebb: 0x0040, - 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040, - // Block 0x3b, offset 0xec0 - 0xec0: 0x36a2, 0xec1: 0x36d2, 0xec2: 0x3702, 0xec3: 0x3732, 0xec4: 0x32bd, 0xec5: 0x32dd, - 0xec6: 0x32fd, 0xec7: 0x331d, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018, - 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x333d, 0xed1: 0x3761, - 0xed2: 0x3779, 0xed3: 0x3791, 0xed4: 0x37a9, 0xed5: 0x37c1, 0xed6: 0x37d9, 0xed7: 0x37f1, - 0xed8: 0x3809, 0xed9: 0x3821, 0xeda: 0x3839, 0xedb: 0x3851, 0xedc: 0x3869, 0xedd: 0x3881, - 0xede: 0x3899, 0xedf: 0x38b1, 0xee0: 0x335d, 0xee1: 0x337d, 0xee2: 0x339d, 0xee3: 0x33bd, - 0xee4: 0x33dd, 0xee5: 0x33dd, 0xee6: 0x33fd, 0xee7: 0x341d, 0xee8: 0x343d, 0xee9: 0x345d, - 0xeea: 0x347d, 0xeeb: 0x349d, 0xeec: 0x34bd, 0xeed: 0x34dd, 0xeee: 0x34fd, 0xeef: 0x351d, - 0xef0: 0x353d, 0xef1: 0x355d, 0xef2: 0x357d, 0xef3: 0x359d, 0xef4: 0x35bd, 0xef5: 0x35dd, - 0xef6: 0x35fd, 0xef7: 0x361d, 0xef8: 0x363d, 0xef9: 0x365d, 0xefa: 0x367d, 0xefb: 0x369d, - 0xefc: 0x38c9, 0xefd: 0x3901, 0xefe: 0x36bd, 0xeff: 0x0018, - // Block 0x3c, offset 0xf00 - 0xf00: 0x36dd, 0xf01: 0x36fd, 0xf02: 0x371d, 0xf03: 0x373d, 0xf04: 0x375d, 0xf05: 0x377d, - 0xf06: 0x379d, 0xf07: 0x37bd, 0xf08: 0x37dd, 0xf09: 0x37fd, 0xf0a: 0x381d, 0xf0b: 0x383d, - 0xf0c: 0x385d, 0xf0d: 0x387d, 0xf0e: 0x389d, 0xf0f: 0x38bd, 0xf10: 0x38dd, 0xf11: 0x38fd, - 0xf12: 0x391d, 0xf13: 0x393d, 0xf14: 0x395d, 0xf15: 0x397d, 0xf16: 0x399d, 0xf17: 0x39bd, - 0xf18: 0x39dd, 0xf19: 0x39fd, 0xf1a: 0x3a1d, 0xf1b: 0x3a3d, 0xf1c: 0x3a5d, 0xf1d: 0x3a7d, - 0xf1e: 0x3a9d, 0xf1f: 0x3abd, 0xf20: 0x3add, 0xf21: 0x3afd, 0xf22: 0x3b1d, 0xf23: 0x3b3d, - 0xf24: 0x3b5d, 0xf25: 0x3b7d, 0xf26: 0x127d, 0xf27: 0x3b9d, 0xf28: 0x3bbd, 0xf29: 0x3bdd, - 0xf2a: 0x3bfd, 0xf2b: 0x3c1d, 0xf2c: 0x3c3d, 0xf2d: 0x3c5d, 0xf2e: 0x239d, 0xf2f: 0x3c7d, - 0xf30: 0x3c9d, 0xf31: 0x3939, 0xf32: 0x3951, 0xf33: 0x3969, 0xf34: 0x3981, 0xf35: 0x3999, - 0xf36: 0x39b1, 0xf37: 0x39c9, 0xf38: 0x39e1, 0xf39: 0x39f9, 0xf3a: 0x3a11, 0xf3b: 0x3a29, - 0xf3c: 0x3a41, 0xf3d: 0x3a59, 0xf3e: 0x3a71, 0xf3f: 0x3a89, - // Block 0x3d, offset 0xf40 - 0xf40: 0x3aa1, 0xf41: 0x3ac9, 0xf42: 0x3af1, 0xf43: 0x3b19, 0xf44: 0x3b41, 0xf45: 0x3b69, - 0xf46: 0x3b91, 0xf47: 0x3bb9, 0xf48: 0x3be1, 0xf49: 0x3c09, 0xf4a: 0x3c39, 0xf4b: 0x3c69, - 0xf4c: 0x3c99, 0xf4d: 0x3cbd, 0xf4e: 0x3cb1, 0xf4f: 0x3cdd, 0xf50: 0x3cfd, 0xf51: 0x3d15, - 0xf52: 0x3d2d, 0xf53: 0x3d45, 0xf54: 0x3d5d, 0xf55: 0x3d5d, 0xf56: 0x3d45, 0xf57: 0x3d75, - 0xf58: 0x07bd, 0xf59: 0x3d8d, 0xf5a: 0x3da5, 0xf5b: 0x3dbd, 0xf5c: 0x3dd5, 0xf5d: 0x3ded, - 0xf5e: 0x3e05, 0xf5f: 0x3e1d, 0xf60: 0x3e35, 0xf61: 0x3e4d, 0xf62: 0x3e65, 0xf63: 0x3e7d, - 0xf64: 0x3e95, 0xf65: 0x3e95, 0xf66: 0x3ead, 0xf67: 0x3ead, 0xf68: 0x3ec5, 0xf69: 0x3ec5, - 0xf6a: 0x3edd, 0xf6b: 0x3ef5, 0xf6c: 0x3f0d, 0xf6d: 0x3f25, 0xf6e: 0x3f3d, 0xf6f: 0x3f3d, - 0xf70: 0x3f55, 0xf71: 0x3f55, 0xf72: 0x3f55, 0xf73: 0x3f6d, 0xf74: 0x3f85, 0xf75: 0x3f9d, - 0xf76: 0x3fb5, 0xf77: 0x3f9d, 0xf78: 0x3fcd, 0xf79: 0x3fe5, 0xf7a: 0x3f6d, 0xf7b: 0x3ffd, - 0xf7c: 0x4015, 0xf7d: 0x4015, 0xf7e: 0x4015, 0xf7f: 0x0040, - // Block 0x3e, offset 0xf80 - 0xf80: 0x3cc9, 0xf81: 0x3d31, 0xf82: 0x3d99, 0xf83: 0x3e01, 0xf84: 0x3e51, 0xf85: 0x3eb9, - 0xf86: 0x3f09, 0xf87: 0x3f59, 0xf88: 0x3fd9, 0xf89: 0x4041, 0xf8a: 0x4091, 0xf8b: 0x40e1, - 0xf8c: 0x4131, 0xf8d: 0x4199, 0xf8e: 0x4201, 0xf8f: 0x4251, 0xf90: 0x42a1, 0xf91: 0x42d9, - 0xf92: 0x4329, 0xf93: 0x4391, 0xf94: 0x43f9, 0xf95: 0x4431, 0xf96: 0x44b1, 0xf97: 0x4549, - 0xf98: 0x45c9, 0xf99: 0x4619, 0xf9a: 0x4699, 0xf9b: 0x4719, 0xf9c: 0x4781, 0xf9d: 0x47d1, - 0xf9e: 0x4821, 0xf9f: 0x4871, 0xfa0: 0x48d9, 0xfa1: 0x4959, 0xfa2: 0x49c1, 0xfa3: 0x4a11, - 0xfa4: 0x4a61, 0xfa5: 0x4ab1, 0xfa6: 0x4ae9, 0xfa7: 0x4b21, 0xfa8: 0x4b59, 0xfa9: 0x4b91, - 0xfaa: 0x4be1, 0xfab: 0x4c31, 0xfac: 0x4cb1, 0xfad: 0x4d01, 0xfae: 0x4d69, 0xfaf: 0x4de9, - 0xfb0: 0x4e39, 0xfb1: 0x4e71, 0xfb2: 0x4ea9, 0xfb3: 0x4f29, 0xfb4: 0x4f91, 0xfb5: 0x5011, - 0xfb6: 0x5061, 0xfb7: 0x50e1, 0xfb8: 0x5119, 0xfb9: 0x5169, 0xfba: 0x51b9, 0xfbb: 0x5209, - 0xfbc: 0x5259, 0xfbd: 0x52a9, 0xfbe: 0x5311, 0xfbf: 0x5361, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x5399, 0xfc1: 0x53e9, 0xfc2: 0x5439, 0xfc3: 0x5489, 0xfc4: 0x54f1, 0xfc5: 0x5541, - 0xfc6: 0x5591, 0xfc7: 0x55e1, 0xfc8: 0x5661, 0xfc9: 0x56c9, 0xfca: 0x5701, 0xfcb: 0x5781, - 0xfcc: 0x57b9, 0xfcd: 0x5821, 0xfce: 0x5889, 0xfcf: 0x58d9, 0xfd0: 0x5929, 0xfd1: 0x5979, - 0xfd2: 0x59e1, 0xfd3: 0x5a19, 0xfd4: 0x5a69, 0xfd5: 0x5ad1, 0xfd6: 0x5b09, 0xfd7: 0x5b89, - 0xfd8: 0x5bd9, 0xfd9: 0x5c01, 0xfda: 0x5c29, 0xfdb: 0x5c51, 0xfdc: 0x5c79, 0xfdd: 0x5ca1, - 0xfde: 0x5cc9, 0xfdf: 0x5cf1, 0xfe0: 0x5d19, 0xfe1: 0x5d41, 0xfe2: 0x5d69, 0xfe3: 0x5d99, - 0xfe4: 0x5dc9, 0xfe5: 0x5df9, 0xfe6: 0x5e29, 0xfe7: 0x5e59, 0xfe8: 0x5e89, 0xfe9: 0x5eb9, - 0xfea: 0x5ee9, 0xfeb: 0x5f19, 0xfec: 0x5f49, 0xfed: 0x5f79, 0xfee: 0x5fa9, 0xfef: 0x5fd9, - 0xff0: 0x6009, 0xff1: 0x402d, 0xff2: 0x6039, 0xff3: 0x6051, 0xff4: 0x404d, 0xff5: 0x6069, - 0xff6: 0x6081, 0xff7: 0x6099, 0xff8: 0x406d, 0xff9: 0x406d, 0xffa: 0x60b1, 0xffb: 0x60c9, - 0xffc: 0x6101, 0xffd: 0x6139, 0xffe: 0x6171, 0xfff: 0x61a9, - // Block 0x40, offset 0x1000 - 0x1000: 0x6211, 0x1001: 0x6229, 0x1002: 0x408d, 0x1003: 0x6241, 0x1004: 0x6259, 0x1005: 0x6271, - 0x1006: 0x6289, 0x1007: 0x62a1, 0x1008: 0x40ad, 0x1009: 0x62b9, 0x100a: 0x62e1, 0x100b: 0x62f9, - 0x100c: 0x40cd, 0x100d: 0x40cd, 0x100e: 0x6311, 0x100f: 0x6329, 0x1010: 0x6341, 0x1011: 0x40ed, - 0x1012: 0x410d, 0x1013: 0x412d, 0x1014: 0x414d, 0x1015: 0x416d, 0x1016: 0x6359, 0x1017: 0x6371, - 0x1018: 0x6389, 0x1019: 0x63a1, 0x101a: 0x63b9, 0x101b: 0x418d, 0x101c: 0x63d1, 0x101d: 0x63e9, - 0x101e: 0x6401, 0x101f: 0x41ad, 0x1020: 0x41cd, 0x1021: 0x6419, 0x1022: 0x41ed, 0x1023: 0x420d, - 0x1024: 0x422d, 0x1025: 0x6431, 0x1026: 0x424d, 0x1027: 0x6449, 0x1028: 0x6479, 0x1029: 0x6211, - 0x102a: 0x426d, 0x102b: 0x428d, 0x102c: 0x42ad, 0x102d: 0x42cd, 0x102e: 0x64b1, 0x102f: 0x64f1, - 0x1030: 0x6539, 0x1031: 0x6551, 0x1032: 0x42ed, 0x1033: 0x6569, 0x1034: 0x6581, 0x1035: 0x6599, - 0x1036: 0x430d, 0x1037: 0x65b1, 0x1038: 0x65c9, 0x1039: 0x65b1, 0x103a: 0x65e1, 0x103b: 0x65f9, - 0x103c: 0x432d, 0x103d: 0x6611, 0x103e: 0x6629, 0x103f: 0x6611, - // Block 0x41, offset 0x1040 - 0x1040: 0x434d, 0x1041: 0x436d, 0x1042: 0x0040, 0x1043: 0x6641, 0x1044: 0x6659, 0x1045: 0x6671, - 0x1046: 0x6689, 0x1047: 0x0040, 0x1048: 0x66c1, 0x1049: 0x66d9, 0x104a: 0x66f1, 0x104b: 0x6709, - 0x104c: 0x6721, 0x104d: 0x6739, 0x104e: 0x6401, 0x104f: 0x6751, 0x1050: 0x6769, 0x1051: 0x6781, - 0x1052: 0x438d, 0x1053: 0x6799, 0x1054: 0x6289, 0x1055: 0x43ad, 0x1056: 0x43cd, 0x1057: 0x67b1, - 0x1058: 0x0040, 0x1059: 0x43ed, 0x105a: 0x67c9, 0x105b: 0x67e1, 0x105c: 0x67f9, 0x105d: 0x6811, - 0x105e: 0x6829, 0x105f: 0x6859, 0x1060: 0x6889, 0x1061: 0x68b1, 0x1062: 0x68d9, 0x1063: 0x6901, - 0x1064: 0x6929, 0x1065: 0x6951, 0x1066: 0x6979, 0x1067: 0x69a1, 0x1068: 0x69c9, 0x1069: 0x69f1, - 0x106a: 0x6a21, 0x106b: 0x6a51, 0x106c: 0x6a81, 0x106d: 0x6ab1, 0x106e: 0x6ae1, 0x106f: 0x6b11, - 0x1070: 0x6b41, 0x1071: 0x6b71, 0x1072: 0x6ba1, 0x1073: 0x6bd1, 0x1074: 0x6c01, 0x1075: 0x6c31, - 0x1076: 0x6c61, 0x1077: 0x6c91, 0x1078: 0x6cc1, 0x1079: 0x6cf1, 0x107a: 0x6d21, 0x107b: 0x6d51, - 0x107c: 0x6d81, 0x107d: 0x6db1, 0x107e: 0x6de1, 0x107f: 0x440d, - // Block 0x42, offset 0x1080 - 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008, - 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008, - 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008, - 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008, - 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008, - 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008, - 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008, - 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x1308, - 0x10b0: 0x1318, 0x10b1: 0x1318, 0x10b2: 0x1318, 0x10b3: 0x0018, 0x10b4: 0x1308, 0x10b5: 0x1308, - 0x10b6: 0x1308, 0x10b7: 0x1308, 0x10b8: 0x1308, 0x10b9: 0x1308, 0x10ba: 0x1308, 0x10bb: 0x1308, - 0x10bc: 0x1308, 0x10bd: 0x1308, 0x10be: 0x0018, 0x10bf: 0x0008, - // Block 0x43, offset 0x10c0 - 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, - 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, - 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, - 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, - 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x0ea1, 0x10dd: 0x6e11, - 0x10de: 0x1308, 0x10df: 0x1308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008, - 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008, - 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008, - 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008, - 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008, - 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008, - // Block 0x44, offset 0x1100 - 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018, - 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018, - 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018, - 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008, - 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008, - 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008, - 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, - 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008, - 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008, - 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008, - 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008, - // Block 0x45, offset 0x1140 - 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, - 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, - 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, - 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, - 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008, - 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008, - 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, - 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, - 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, - 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d, - 0x117c: 0x0008, 0x117d: 0x442d, 0x117e: 0xe00d, 0x117f: 0x0008, - // Block 0x46, offset 0x1180 - 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, - 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d, - 0x118c: 0x0008, 0x118d: 0x11d9, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, - 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, - 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, - 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, - 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0x6e29, 0x11ab: 0x1029, 0x11ac: 0x11c1, 0x11ad: 0x6e41, 0x11ae: 0x1221, 0x11af: 0x0040, - 0x11b0: 0x6e59, 0x11b1: 0x6e71, 0x11b2: 0x1239, 0x11b3: 0x444d, 0x11b4: 0xe00d, 0x11b5: 0x0008, - 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0x0040, 0x11b9: 0x0040, 0x11ba: 0x0040, 0x11bb: 0x0040, - 0x11bc: 0x0040, 0x11bd: 0x0040, 0x11be: 0x0040, 0x11bf: 0x0040, - // Block 0x47, offset 0x11c0 - 0x11c0: 0x64d5, 0x11c1: 0x64f5, 0x11c2: 0x6515, 0x11c3: 0x6535, 0x11c4: 0x6555, 0x11c5: 0x6575, - 0x11c6: 0x6595, 0x11c7: 0x65b5, 0x11c8: 0x65d5, 0x11c9: 0x65f5, 0x11ca: 0x6615, 0x11cb: 0x6635, - 0x11cc: 0x6655, 0x11cd: 0x6675, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x6695, 0x11d1: 0x0008, - 0x11d2: 0x66b5, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x66d5, 0x11d6: 0x66f5, 0x11d7: 0x6715, - 0x11d8: 0x6735, 0x11d9: 0x6755, 0x11da: 0x6775, 0x11db: 0x6795, 0x11dc: 0x67b5, 0x11dd: 0x67d5, - 0x11de: 0x67f5, 0x11df: 0x0008, 0x11e0: 0x6815, 0x11e1: 0x0008, 0x11e2: 0x6835, 0x11e3: 0x0008, - 0x11e4: 0x0008, 0x11e5: 0x6855, 0x11e6: 0x6875, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008, - 0x11ea: 0x6895, 0x11eb: 0x68b5, 0x11ec: 0x68d5, 0x11ed: 0x68f5, 0x11ee: 0x6915, 0x11ef: 0x6935, - 0x11f0: 0x6955, 0x11f1: 0x6975, 0x11f2: 0x6995, 0x11f3: 0x69b5, 0x11f4: 0x69d5, 0x11f5: 0x69f5, - 0x11f6: 0x6a15, 0x11f7: 0x6a35, 0x11f8: 0x6a55, 0x11f9: 0x6a75, 0x11fa: 0x6a95, 0x11fb: 0x6ab5, - 0x11fc: 0x6ad5, 0x11fd: 0x6af5, 0x11fe: 0x6b15, 0x11ff: 0x6b35, - // Block 0x48, offset 0x1200 - 0x1200: 0x7a95, 0x1201: 0x7ab5, 0x1202: 0x7ad5, 0x1203: 0x7af5, 0x1204: 0x7b15, 0x1205: 0x7b35, - 0x1206: 0x7b55, 0x1207: 0x7b75, 0x1208: 0x7b95, 0x1209: 0x7bb5, 0x120a: 0x7bd5, 0x120b: 0x7bf5, - 0x120c: 0x7c15, 0x120d: 0x7c35, 0x120e: 0x7c55, 0x120f: 0x6ec9, 0x1210: 0x6ef1, 0x1211: 0x6f19, - 0x1212: 0x7c75, 0x1213: 0x7c95, 0x1214: 0x7cb5, 0x1215: 0x6f41, 0x1216: 0x6f69, 0x1217: 0x6f91, - 0x1218: 0x7cd5, 0x1219: 0x7cf5, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, - 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, - 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, - 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, - 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040, - 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, - 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, - // Block 0x49, offset 0x1240 - 0x1240: 0x6fb9, 0x1241: 0x6fd1, 0x1242: 0x6fe9, 0x1243: 0x7d15, 0x1244: 0x7d35, 0x1245: 0x7001, - 0x1246: 0x7001, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040, - 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040, - 0x1252: 0x0040, 0x1253: 0x7019, 0x1254: 0x7041, 0x1255: 0x7069, 0x1256: 0x7091, 0x1257: 0x70b9, - 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x70e1, - 0x125e: 0x1308, 0x125f: 0x7109, 0x1260: 0x7131, 0x1261: 0x20a9, 0x1262: 0x20f1, 0x1263: 0x7149, - 0x1264: 0x7161, 0x1265: 0x7179, 0x1266: 0x7191, 0x1267: 0x71a9, 0x1268: 0x71c1, 0x1269: 0x1fb2, - 0x126a: 0x71d9, 0x126b: 0x7201, 0x126c: 0x7229, 0x126d: 0x7261, 0x126e: 0x7299, 0x126f: 0x72c1, - 0x1270: 0x72e9, 0x1271: 0x7311, 0x1272: 0x7339, 0x1273: 0x7361, 0x1274: 0x7389, 0x1275: 0x73b1, - 0x1276: 0x73d9, 0x1277: 0x0040, 0x1278: 0x7401, 0x1279: 0x7429, 0x127a: 0x7451, 0x127b: 0x7479, - 0x127c: 0x74a1, 0x127d: 0x0040, 0x127e: 0x74c9, 0x127f: 0x0040, - // Block 0x4a, offset 0x1280 - 0x1280: 0x74f1, 0x1281: 0x7519, 0x1282: 0x0040, 0x1283: 0x7541, 0x1284: 0x7569, 0x1285: 0x0040, - 0x1286: 0x7591, 0x1287: 0x75b9, 0x1288: 0x75e1, 0x1289: 0x7609, 0x128a: 0x7631, 0x128b: 0x7659, - 0x128c: 0x7681, 0x128d: 0x76a9, 0x128e: 0x76d1, 0x128f: 0x76f9, 0x1290: 0x7721, 0x1291: 0x7721, - 0x1292: 0x7739, 0x1293: 0x7739, 0x1294: 0x7739, 0x1295: 0x7739, 0x1296: 0x7751, 0x1297: 0x7751, - 0x1298: 0x7751, 0x1299: 0x7751, 0x129a: 0x7769, 0x129b: 0x7769, 0x129c: 0x7769, 0x129d: 0x7769, - 0x129e: 0x7781, 0x129f: 0x7781, 0x12a0: 0x7781, 0x12a1: 0x7781, 0x12a2: 0x7799, 0x12a3: 0x7799, - 0x12a4: 0x7799, 0x12a5: 0x7799, 0x12a6: 0x77b1, 0x12a7: 0x77b1, 0x12a8: 0x77b1, 0x12a9: 0x77b1, - 0x12aa: 0x77c9, 0x12ab: 0x77c9, 0x12ac: 0x77c9, 0x12ad: 0x77c9, 0x12ae: 0x77e1, 0x12af: 0x77e1, - 0x12b0: 0x77e1, 0x12b1: 0x77e1, 0x12b2: 0x77f9, 0x12b3: 0x77f9, 0x12b4: 0x77f9, 0x12b5: 0x77f9, - 0x12b6: 0x7811, 0x12b7: 0x7811, 0x12b8: 0x7811, 0x12b9: 0x7811, 0x12ba: 0x7829, 0x12bb: 0x7829, - 0x12bc: 0x7829, 0x12bd: 0x7829, 0x12be: 0x7841, 0x12bf: 0x7841, - // Block 0x4b, offset 0x12c0 - 0x12c0: 0x7841, 0x12c1: 0x7841, 0x12c2: 0x7859, 0x12c3: 0x7859, 0x12c4: 0x7871, 0x12c5: 0x7871, - 0x12c6: 0x7889, 0x12c7: 0x7889, 0x12c8: 0x78a1, 0x12c9: 0x78a1, 0x12ca: 0x78b9, 0x12cb: 0x78b9, - 0x12cc: 0x78d1, 0x12cd: 0x78d1, 0x12ce: 0x78e9, 0x12cf: 0x78e9, 0x12d0: 0x78e9, 0x12d1: 0x78e9, - 0x12d2: 0x7901, 0x12d3: 0x7901, 0x12d4: 0x7901, 0x12d5: 0x7901, 0x12d6: 0x7919, 0x12d7: 0x7919, - 0x12d8: 0x7919, 0x12d9: 0x7919, 0x12da: 0x7931, 0x12db: 0x7931, 0x12dc: 0x7931, 0x12dd: 0x7931, - 0x12de: 0x7949, 0x12df: 0x7949, 0x12e0: 0x7961, 0x12e1: 0x7961, 0x12e2: 0x7961, 0x12e3: 0x7961, - 0x12e4: 0x7979, 0x12e5: 0x7979, 0x12e6: 0x7991, 0x12e7: 0x7991, 0x12e8: 0x7991, 0x12e9: 0x7991, - 0x12ea: 0x79a9, 0x12eb: 0x79a9, 0x12ec: 0x79a9, 0x12ed: 0x79a9, 0x12ee: 0x79c1, 0x12ef: 0x79c1, - 0x12f0: 0x79d9, 0x12f1: 0x79d9, 0x12f2: 0x0018, 0x12f3: 0x0018, 0x12f4: 0x0018, 0x12f5: 0x0018, - 0x12f6: 0x0018, 0x12f7: 0x0018, 0x12f8: 0x0018, 0x12f9: 0x0018, 0x12fa: 0x0018, 0x12fb: 0x0018, - 0x12fc: 0x0018, 0x12fd: 0x0018, 0x12fe: 0x0018, 0x12ff: 0x0018, - // Block 0x4c, offset 0x1300 - 0x1300: 0x0018, 0x1301: 0x0018, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040, - 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040, - 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040, - 0x1312: 0x0040, 0x1313: 0x79f1, 0x1314: 0x79f1, 0x1315: 0x79f1, 0x1316: 0x79f1, 0x1317: 0x7a09, - 0x1318: 0x7a09, 0x1319: 0x7a21, 0x131a: 0x7a21, 0x131b: 0x7a39, 0x131c: 0x7a39, 0x131d: 0x0479, - 0x131e: 0x7a51, 0x131f: 0x7a51, 0x1320: 0x7a69, 0x1321: 0x7a69, 0x1322: 0x7a81, 0x1323: 0x7a81, - 0x1324: 0x7a99, 0x1325: 0x7a99, 0x1326: 0x7a99, 0x1327: 0x7a99, 0x1328: 0x7ab1, 0x1329: 0x7ab1, - 0x132a: 0x7ac9, 0x132b: 0x7ac9, 0x132c: 0x7af1, 0x132d: 0x7af1, 0x132e: 0x7b19, 0x132f: 0x7b19, - 0x1330: 0x7b41, 0x1331: 0x7b41, 0x1332: 0x7b69, 0x1333: 0x7b69, 0x1334: 0x7b91, 0x1335: 0x7b91, - 0x1336: 0x7bb9, 0x1337: 0x7bb9, 0x1338: 0x7bb9, 0x1339: 0x7be1, 0x133a: 0x7be1, 0x133b: 0x7be1, - 0x133c: 0x7c09, 0x133d: 0x7c09, 0x133e: 0x7c09, 0x133f: 0x7c09, - // Block 0x4d, offset 0x1340 - 0x1340: 0x85f9, 0x1341: 0x8621, 0x1342: 0x8649, 0x1343: 0x8671, 0x1344: 0x8699, 0x1345: 0x86c1, - 0x1346: 0x86e9, 0x1347: 0x8711, 0x1348: 0x8739, 0x1349: 0x8761, 0x134a: 0x8789, 0x134b: 0x87b1, - 0x134c: 0x87d9, 0x134d: 0x8801, 0x134e: 0x8829, 0x134f: 0x8851, 0x1350: 0x8879, 0x1351: 0x88a1, - 0x1352: 0x88c9, 0x1353: 0x88f1, 0x1354: 0x8919, 0x1355: 0x8941, 0x1356: 0x8969, 0x1357: 0x8991, - 0x1358: 0x89b9, 0x1359: 0x89e1, 0x135a: 0x8a09, 0x135b: 0x8a31, 0x135c: 0x8a59, 0x135d: 0x8a81, - 0x135e: 0x8aaa, 0x135f: 0x8ada, 0x1360: 0x8b0a, 0x1361: 0x8b3a, 0x1362: 0x8b6a, 0x1363: 0x8b9a, - 0x1364: 0x8bc9, 0x1365: 0x8bf1, 0x1366: 0x7c71, 0x1367: 0x8c19, 0x1368: 0x7be1, 0x1369: 0x7c99, - 0x136a: 0x8c41, 0x136b: 0x8c69, 0x136c: 0x7d39, 0x136d: 0x8c91, 0x136e: 0x7d61, 0x136f: 0x7d89, - 0x1370: 0x8cb9, 0x1371: 0x8ce1, 0x1372: 0x7e29, 0x1373: 0x8d09, 0x1374: 0x7e51, 0x1375: 0x7e79, - 0x1376: 0x8d31, 0x1377: 0x8d59, 0x1378: 0x7ec9, 0x1379: 0x8d81, 0x137a: 0x7ef1, 0x137b: 0x7f19, - 0x137c: 0x83a1, 0x137d: 0x83c9, 0x137e: 0x8441, 0x137f: 0x8469, - // Block 0x4e, offset 0x1380 - 0x1380: 0x8491, 0x1381: 0x8531, 0x1382: 0x8559, 0x1383: 0x8581, 0x1384: 0x85a9, 0x1385: 0x8649, - 0x1386: 0x8671, 0x1387: 0x8699, 0x1388: 0x8da9, 0x1389: 0x8739, 0x138a: 0x8dd1, 0x138b: 0x8df9, - 0x138c: 0x8829, 0x138d: 0x8e21, 0x138e: 0x8851, 0x138f: 0x8879, 0x1390: 0x8a81, 0x1391: 0x8e49, - 0x1392: 0x8e71, 0x1393: 0x89b9, 0x1394: 0x8e99, 0x1395: 0x89e1, 0x1396: 0x8a09, 0x1397: 0x7c21, - 0x1398: 0x7c49, 0x1399: 0x8ec1, 0x139a: 0x7c71, 0x139b: 0x8ee9, 0x139c: 0x7cc1, 0x139d: 0x7ce9, - 0x139e: 0x7d11, 0x139f: 0x7d39, 0x13a0: 0x8f11, 0x13a1: 0x7db1, 0x13a2: 0x7dd9, 0x13a3: 0x7e01, - 0x13a4: 0x7e29, 0x13a5: 0x8f39, 0x13a6: 0x7ec9, 0x13a7: 0x7f41, 0x13a8: 0x7f69, 0x13a9: 0x7f91, - 0x13aa: 0x7fb9, 0x13ab: 0x7fe1, 0x13ac: 0x8031, 0x13ad: 0x8059, 0x13ae: 0x8081, 0x13af: 0x80a9, - 0x13b0: 0x80d1, 0x13b1: 0x80f9, 0x13b2: 0x8f61, 0x13b3: 0x8121, 0x13b4: 0x8149, 0x13b5: 0x8171, - 0x13b6: 0x8199, 0x13b7: 0x81c1, 0x13b8: 0x81e9, 0x13b9: 0x8239, 0x13ba: 0x8261, 0x13bb: 0x8289, - 0x13bc: 0x82b1, 0x13bd: 0x82d9, 0x13be: 0x8301, 0x13bf: 0x8329, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x8351, 0x13c1: 0x8379, 0x13c2: 0x83f1, 0x13c3: 0x8419, 0x13c4: 0x84b9, 0x13c5: 0x84e1, - 0x13c6: 0x8509, 0x13c7: 0x8531, 0x13c8: 0x8559, 0x13c9: 0x85d1, 0x13ca: 0x85f9, 0x13cb: 0x8621, - 0x13cc: 0x8649, 0x13cd: 0x8f89, 0x13ce: 0x86c1, 0x13cf: 0x86e9, 0x13d0: 0x8711, 0x13d1: 0x8739, - 0x13d2: 0x87b1, 0x13d3: 0x87d9, 0x13d4: 0x8801, 0x13d5: 0x8829, 0x13d6: 0x8fb1, 0x13d7: 0x88a1, - 0x13d8: 0x88c9, 0x13d9: 0x8fd9, 0x13da: 0x8941, 0x13db: 0x8969, 0x13dc: 0x8991, 0x13dd: 0x89b9, - 0x13de: 0x9001, 0x13df: 0x7c71, 0x13e0: 0x8ee9, 0x13e1: 0x7d39, 0x13e2: 0x8f11, 0x13e3: 0x7e29, - 0x13e4: 0x8f39, 0x13e5: 0x7ec9, 0x13e6: 0x9029, 0x13e7: 0x80d1, 0x13e8: 0x9051, 0x13e9: 0x9079, - 0x13ea: 0x90a1, 0x13eb: 0x8531, 0x13ec: 0x8559, 0x13ed: 0x8649, 0x13ee: 0x8829, 0x13ef: 0x8fb1, - 0x13f0: 0x89b9, 0x13f1: 0x9001, 0x13f2: 0x90c9, 0x13f3: 0x9101, 0x13f4: 0x9139, 0x13f5: 0x9171, - 0x13f6: 0x9199, 0x13f7: 0x91c1, 0x13f8: 0x91e9, 0x13f9: 0x9211, 0x13fa: 0x9239, 0x13fb: 0x9261, - 0x13fc: 0x9289, 0x13fd: 0x92b1, 0x13fe: 0x92d9, 0x13ff: 0x9301, - // Block 0x50, offset 0x1400 - 0x1400: 0x9329, 0x1401: 0x9351, 0x1402: 0x9379, 0x1403: 0x93a1, 0x1404: 0x93c9, 0x1405: 0x93f1, - 0x1406: 0x9419, 0x1407: 0x9441, 0x1408: 0x9469, 0x1409: 0x9491, 0x140a: 0x94b9, 0x140b: 0x94e1, - 0x140c: 0x9079, 0x140d: 0x9509, 0x140e: 0x9531, 0x140f: 0x9559, 0x1410: 0x9581, 0x1411: 0x9171, - 0x1412: 0x9199, 0x1413: 0x91c1, 0x1414: 0x91e9, 0x1415: 0x9211, 0x1416: 0x9239, 0x1417: 0x9261, - 0x1418: 0x9289, 0x1419: 0x92b1, 0x141a: 0x92d9, 0x141b: 0x9301, 0x141c: 0x9329, 0x141d: 0x9351, - 0x141e: 0x9379, 0x141f: 0x93a1, 0x1420: 0x93c9, 0x1421: 0x93f1, 0x1422: 0x9419, 0x1423: 0x9441, - 0x1424: 0x9469, 0x1425: 0x9491, 0x1426: 0x94b9, 0x1427: 0x94e1, 0x1428: 0x9079, 0x1429: 0x9509, - 0x142a: 0x9531, 0x142b: 0x9559, 0x142c: 0x9581, 0x142d: 0x9491, 0x142e: 0x94b9, 0x142f: 0x94e1, - 0x1430: 0x9079, 0x1431: 0x9051, 0x1432: 0x90a1, 0x1433: 0x8211, 0x1434: 0x8059, 0x1435: 0x8081, - 0x1436: 0x80a9, 0x1437: 0x9491, 0x1438: 0x94b9, 0x1439: 0x94e1, 0x143a: 0x8211, 0x143b: 0x8239, - 0x143c: 0x95a9, 0x143d: 0x95a9, 0x143e: 0x0018, 0x143f: 0x0018, - // Block 0x51, offset 0x1440 - 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040, - 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040, - 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x95d1, 0x1451: 0x9609, - 0x1452: 0x9609, 0x1453: 0x9641, 0x1454: 0x9679, 0x1455: 0x96b1, 0x1456: 0x96e9, 0x1457: 0x9721, - 0x1458: 0x9759, 0x1459: 0x9759, 0x145a: 0x9791, 0x145b: 0x97c9, 0x145c: 0x9801, 0x145d: 0x9839, - 0x145e: 0x9871, 0x145f: 0x98a9, 0x1460: 0x98a9, 0x1461: 0x98e1, 0x1462: 0x9919, 0x1463: 0x9919, - 0x1464: 0x9951, 0x1465: 0x9951, 0x1466: 0x9989, 0x1467: 0x99c1, 0x1468: 0x99c1, 0x1469: 0x99f9, - 0x146a: 0x9a31, 0x146b: 0x9a31, 0x146c: 0x9a69, 0x146d: 0x9a69, 0x146e: 0x9aa1, 0x146f: 0x9ad9, - 0x1470: 0x9ad9, 0x1471: 0x9b11, 0x1472: 0x9b11, 0x1473: 0x9b49, 0x1474: 0x9b81, 0x1475: 0x9bb9, - 0x1476: 0x9bf1, 0x1477: 0x9bf1, 0x1478: 0x9c29, 0x1479: 0x9c61, 0x147a: 0x9c99, 0x147b: 0x9cd1, - 0x147c: 0x9d09, 0x147d: 0x9d09, 0x147e: 0x9d41, 0x147f: 0x9d79, - // Block 0x52, offset 0x1480 - 0x1480: 0xa949, 0x1481: 0xa981, 0x1482: 0xa9b9, 0x1483: 0xa8a1, 0x1484: 0x9bb9, 0x1485: 0x9989, - 0x1486: 0xa9f1, 0x1487: 0xaa29, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, - 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040, - 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040, - 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040, - 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040, - 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040, - 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040, - 0x14b0: 0xaa61, 0x14b1: 0xaa99, 0x14b2: 0xaad1, 0x14b3: 0xab19, 0x14b4: 0xab61, 0x14b5: 0xaba9, - 0x14b6: 0xabf1, 0x14b7: 0xac39, 0x14b8: 0xac81, 0x14b9: 0xacc9, 0x14ba: 0xad02, 0x14bb: 0xae12, - 0x14bc: 0xae91, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x13c0, 0x14c1: 0x13c0, 0x14c2: 0x13c0, 0x14c3: 0x13c0, 0x14c4: 0x13c0, 0x14c5: 0x13c0, - 0x14c6: 0x13c0, 0x14c7: 0x13c0, 0x14c8: 0x13c0, 0x14c9: 0x13c0, 0x14ca: 0x13c0, 0x14cb: 0x13c0, - 0x14cc: 0x13c0, 0x14cd: 0x13c0, 0x14ce: 0x13c0, 0x14cf: 0x13c0, 0x14d0: 0xaeda, 0x14d1: 0x7d55, - 0x14d2: 0x0040, 0x14d3: 0xaeea, 0x14d4: 0x03c2, 0x14d5: 0xaefa, 0x14d6: 0xaf0a, 0x14d7: 0x7d75, - 0x14d8: 0x7d95, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, - 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x1308, 0x14e1: 0x1308, 0x14e2: 0x1308, 0x14e3: 0x1308, - 0x14e4: 0x1308, 0x14e5: 0x1308, 0x14e6: 0x1308, 0x14e7: 0x1308, 0x14e8: 0x1308, 0x14e9: 0x1308, - 0x14ea: 0x1308, 0x14eb: 0x1308, 0x14ec: 0x1308, 0x14ed: 0x1308, 0x14ee: 0x1308, 0x14ef: 0x1308, - 0x14f0: 0x0040, 0x14f1: 0x7db5, 0x14f2: 0x7dd5, 0x14f3: 0xaf1a, 0x14f4: 0xaf1a, 0x14f5: 0x1fd2, - 0x14f6: 0x1fe2, 0x14f7: 0xaf2a, 0x14f8: 0xaf3a, 0x14f9: 0x7df5, 0x14fa: 0x7e15, 0x14fb: 0x7e35, - 0x14fc: 0x7df5, 0x14fd: 0x7e55, 0x14fe: 0x7e75, 0x14ff: 0x7e55, - // Block 0x54, offset 0x1500 - 0x1500: 0x7e95, 0x1501: 0x7eb5, 0x1502: 0x7ed5, 0x1503: 0x7eb5, 0x1504: 0x7ef5, 0x1505: 0x0018, - 0x1506: 0x0018, 0x1507: 0xaf4a, 0x1508: 0xaf5a, 0x1509: 0x7f16, 0x150a: 0x7f36, 0x150b: 0x7f56, - 0x150c: 0x7f76, 0x150d: 0xaf1a, 0x150e: 0xaf1a, 0x150f: 0xaf1a, 0x1510: 0xaeda, 0x1511: 0x7f95, - 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x03c2, 0x1515: 0xaeea, 0x1516: 0xaf0a, 0x1517: 0xaefa, - 0x1518: 0x7fb5, 0x1519: 0x1fd2, 0x151a: 0x1fe2, 0x151b: 0xaf2a, 0x151c: 0xaf3a, 0x151d: 0x7e95, - 0x151e: 0x7ef5, 0x151f: 0xaf6a, 0x1520: 0xaf7a, 0x1521: 0xaf8a, 0x1522: 0x1fb2, 0x1523: 0xaf99, - 0x1524: 0xafaa, 0x1525: 0xafba, 0x1526: 0x1fc2, 0x1527: 0x0040, 0x1528: 0xafca, 0x1529: 0xafda, - 0x152a: 0xafea, 0x152b: 0xaffa, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, - 0x1530: 0x7fd6, 0x1531: 0xb009, 0x1532: 0x7ff6, 0x1533: 0x0008, 0x1534: 0x8016, 0x1535: 0x0040, - 0x1536: 0x8036, 0x1537: 0xb031, 0x1538: 0x8056, 0x1539: 0xb059, 0x153a: 0x8076, 0x153b: 0xb081, - 0x153c: 0x8096, 0x153d: 0xb0a9, 0x153e: 0x80b6, 0x153f: 0xb0d1, - // Block 0x55, offset 0x1540 - 0x1540: 0xb0f9, 0x1541: 0xb111, 0x1542: 0xb111, 0x1543: 0xb129, 0x1544: 0xb129, 0x1545: 0xb141, - 0x1546: 0xb141, 0x1547: 0xb159, 0x1548: 0xb159, 0x1549: 0xb171, 0x154a: 0xb171, 0x154b: 0xb171, - 0x154c: 0xb171, 0x154d: 0xb189, 0x154e: 0xb189, 0x154f: 0xb1a1, 0x1550: 0xb1a1, 0x1551: 0xb1a1, - 0x1552: 0xb1a1, 0x1553: 0xb1b9, 0x1554: 0xb1b9, 0x1555: 0xb1d1, 0x1556: 0xb1d1, 0x1557: 0xb1d1, - 0x1558: 0xb1d1, 0x1559: 0xb1e9, 0x155a: 0xb1e9, 0x155b: 0xb1e9, 0x155c: 0xb1e9, 0x155d: 0xb201, - 0x155e: 0xb201, 0x155f: 0xb201, 0x1560: 0xb201, 0x1561: 0xb219, 0x1562: 0xb219, 0x1563: 0xb219, - 0x1564: 0xb219, 0x1565: 0xb231, 0x1566: 0xb231, 0x1567: 0xb231, 0x1568: 0xb231, 0x1569: 0xb249, - 0x156a: 0xb249, 0x156b: 0xb261, 0x156c: 0xb261, 0x156d: 0xb279, 0x156e: 0xb279, 0x156f: 0xb291, - 0x1570: 0xb291, 0x1571: 0xb2a9, 0x1572: 0xb2a9, 0x1573: 0xb2a9, 0x1574: 0xb2a9, 0x1575: 0xb2c1, - 0x1576: 0xb2c1, 0x1577: 0xb2c1, 0x1578: 0xb2c1, 0x1579: 0xb2d9, 0x157a: 0xb2d9, 0x157b: 0xb2d9, - 0x157c: 0xb2d9, 0x157d: 0xb2f1, 0x157e: 0xb2f1, 0x157f: 0xb2f1, - // Block 0x56, offset 0x1580 - 0x1580: 0xb2f1, 0x1581: 0xb309, 0x1582: 0xb309, 0x1583: 0xb309, 0x1584: 0xb309, 0x1585: 0xb321, - 0x1586: 0xb321, 0x1587: 0xb321, 0x1588: 0xb321, 0x1589: 0xb339, 0x158a: 0xb339, 0x158b: 0xb339, - 0x158c: 0xb339, 0x158d: 0xb351, 0x158e: 0xb351, 0x158f: 0xb351, 0x1590: 0xb351, 0x1591: 0xb369, - 0x1592: 0xb369, 0x1593: 0xb369, 0x1594: 0xb369, 0x1595: 0xb381, 0x1596: 0xb381, 0x1597: 0xb381, - 0x1598: 0xb381, 0x1599: 0xb399, 0x159a: 0xb399, 0x159b: 0xb399, 0x159c: 0xb399, 0x159d: 0xb3b1, - 0x159e: 0xb3b1, 0x159f: 0xb3b1, 0x15a0: 0xb3b1, 0x15a1: 0xb3c9, 0x15a2: 0xb3c9, 0x15a3: 0xb3c9, - 0x15a4: 0xb3c9, 0x15a5: 0xb3e1, 0x15a6: 0xb3e1, 0x15a7: 0xb3e1, 0x15a8: 0xb3e1, 0x15a9: 0xb3f9, - 0x15aa: 0xb3f9, 0x15ab: 0xb3f9, 0x15ac: 0xb3f9, 0x15ad: 0xb411, 0x15ae: 0xb411, 0x15af: 0x7ab1, - 0x15b0: 0x7ab1, 0x15b1: 0xb429, 0x15b2: 0xb429, 0x15b3: 0xb429, 0x15b4: 0xb429, 0x15b5: 0xb441, - 0x15b6: 0xb441, 0x15b7: 0xb469, 0x15b8: 0xb469, 0x15b9: 0xb491, 0x15ba: 0xb491, 0x15bb: 0xb4b9, - 0x15bc: 0xb4b9, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x0040, 0x15c1: 0xaefa, 0x15c2: 0xb4e2, 0x15c3: 0xaf6a, 0x15c4: 0xafda, 0x15c5: 0xafea, - 0x15c6: 0xaf7a, 0x15c7: 0xb4f2, 0x15c8: 0x1fd2, 0x15c9: 0x1fe2, 0x15ca: 0xaf8a, 0x15cb: 0x1fb2, - 0x15cc: 0xaeda, 0x15cd: 0xaf99, 0x15ce: 0x29d1, 0x15cf: 0xb502, 0x15d0: 0x1f41, 0x15d1: 0x00c9, - 0x15d2: 0x0069, 0x15d3: 0x0079, 0x15d4: 0x1f51, 0x15d5: 0x1f61, 0x15d6: 0x1f71, 0x15d7: 0x1f81, - 0x15d8: 0x1f91, 0x15d9: 0x1fa1, 0x15da: 0xaeea, 0x15db: 0x03c2, 0x15dc: 0xafaa, 0x15dd: 0x1fc2, - 0x15de: 0xafba, 0x15df: 0xaf0a, 0x15e0: 0xaffa, 0x15e1: 0x0039, 0x15e2: 0x0ee9, 0x15e3: 0x1159, - 0x15e4: 0x0ef9, 0x15e5: 0x0f09, 0x15e6: 0x1199, 0x15e7: 0x0f31, 0x15e8: 0x0249, 0x15e9: 0x0f41, - 0x15ea: 0x0259, 0x15eb: 0x0f51, 0x15ec: 0x0359, 0x15ed: 0x0f61, 0x15ee: 0x0f71, 0x15ef: 0x00d9, - 0x15f0: 0x0f99, 0x15f1: 0x2039, 0x15f2: 0x0269, 0x15f3: 0x01d9, 0x15f4: 0x0fa9, 0x15f5: 0x0fb9, - 0x15f6: 0x1089, 0x15f7: 0x0279, 0x15f8: 0x0369, 0x15f9: 0x0289, 0x15fa: 0x13d1, 0x15fb: 0xaf4a, - 0x15fc: 0xafca, 0x15fd: 0xaf5a, 0x15fe: 0xb512, 0x15ff: 0xaf1a, - // Block 0x58, offset 0x1600 - 0x1600: 0x1caa, 0x1601: 0x0039, 0x1602: 0x0ee9, 0x1603: 0x1159, 0x1604: 0x0ef9, 0x1605: 0x0f09, - 0x1606: 0x1199, 0x1607: 0x0f31, 0x1608: 0x0249, 0x1609: 0x0f41, 0x160a: 0x0259, 0x160b: 0x0f51, - 0x160c: 0x0359, 0x160d: 0x0f61, 0x160e: 0x0f71, 0x160f: 0x00d9, 0x1610: 0x0f99, 0x1611: 0x2039, - 0x1612: 0x0269, 0x1613: 0x01d9, 0x1614: 0x0fa9, 0x1615: 0x0fb9, 0x1616: 0x1089, 0x1617: 0x0279, - 0x1618: 0x0369, 0x1619: 0x0289, 0x161a: 0x13d1, 0x161b: 0xaf2a, 0x161c: 0xb522, 0x161d: 0xaf3a, - 0x161e: 0xb532, 0x161f: 0x80d5, 0x1620: 0x80f5, 0x1621: 0x29d1, 0x1622: 0x8115, 0x1623: 0x8115, - 0x1624: 0x8135, 0x1625: 0x8155, 0x1626: 0x8175, 0x1627: 0x8195, 0x1628: 0x81b5, 0x1629: 0x81d5, - 0x162a: 0x81f5, 0x162b: 0x8215, 0x162c: 0x8235, 0x162d: 0x8255, 0x162e: 0x8275, 0x162f: 0x8295, - 0x1630: 0x82b5, 0x1631: 0x82d5, 0x1632: 0x82f5, 0x1633: 0x8315, 0x1634: 0x8335, 0x1635: 0x8355, - 0x1636: 0x8375, 0x1637: 0x8395, 0x1638: 0x83b5, 0x1639: 0x83d5, 0x163a: 0x83f5, 0x163b: 0x8415, - 0x163c: 0x81b5, 0x163d: 0x8435, 0x163e: 0x8455, 0x163f: 0x8215, - // Block 0x59, offset 0x1640 - 0x1640: 0x8475, 0x1641: 0x8495, 0x1642: 0x84b5, 0x1643: 0x84d5, 0x1644: 0x84f5, 0x1645: 0x8515, - 0x1646: 0x8535, 0x1647: 0x8555, 0x1648: 0x84d5, 0x1649: 0x8575, 0x164a: 0x84d5, 0x164b: 0x8595, - 0x164c: 0x8595, 0x164d: 0x85b5, 0x164e: 0x85b5, 0x164f: 0x85d5, 0x1650: 0x8515, 0x1651: 0x85f5, - 0x1652: 0x8615, 0x1653: 0x85f5, 0x1654: 0x8635, 0x1655: 0x8615, 0x1656: 0x8655, 0x1657: 0x8655, - 0x1658: 0x8675, 0x1659: 0x8675, 0x165a: 0x8695, 0x165b: 0x8695, 0x165c: 0x8615, 0x165d: 0x8115, - 0x165e: 0x86b5, 0x165f: 0x86d5, 0x1660: 0x0040, 0x1661: 0x86f5, 0x1662: 0x8715, 0x1663: 0x8735, - 0x1664: 0x8755, 0x1665: 0x8735, 0x1666: 0x8775, 0x1667: 0x8795, 0x1668: 0x87b5, 0x1669: 0x87b5, - 0x166a: 0x87d5, 0x166b: 0x87d5, 0x166c: 0x87f5, 0x166d: 0x87f5, 0x166e: 0x87d5, 0x166f: 0x87d5, - 0x1670: 0x8815, 0x1671: 0x8835, 0x1672: 0x8855, 0x1673: 0x8875, 0x1674: 0x8895, 0x1675: 0x88b5, - 0x1676: 0x88b5, 0x1677: 0x88b5, 0x1678: 0x88d5, 0x1679: 0x88d5, 0x167a: 0x88d5, 0x167b: 0x88d5, - 0x167c: 0x87b5, 0x167d: 0x87b5, 0x167e: 0x87b5, 0x167f: 0x0040, - // Block 0x5a, offset 0x1680 - 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x8715, 0x1683: 0x86f5, 0x1684: 0x88f5, 0x1685: 0x86f5, - 0x1686: 0x8715, 0x1687: 0x86f5, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x8915, 0x168b: 0x8715, - 0x168c: 0x8935, 0x168d: 0x88f5, 0x168e: 0x8935, 0x168f: 0x8715, 0x1690: 0x0040, 0x1691: 0x0040, - 0x1692: 0x8955, 0x1693: 0x8975, 0x1694: 0x8875, 0x1695: 0x8935, 0x1696: 0x88f5, 0x1697: 0x8935, - 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x8995, 0x169b: 0x89b5, 0x169c: 0x8995, 0x169d: 0x0040, - 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0xb541, 0x16a1: 0xb559, 0x16a2: 0xb571, 0x16a3: 0x89d6, - 0x16a4: 0xb589, 0x16a5: 0xb5a1, 0x16a6: 0x89f5, 0x16a7: 0x0040, 0x16a8: 0x8a15, 0x16a9: 0x8a35, - 0x16aa: 0x8a55, 0x16ab: 0x8a35, 0x16ac: 0x8a75, 0x16ad: 0x8a95, 0x16ae: 0x8ab5, 0x16af: 0x0040, - 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040, - 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340, - 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x0208, 0x16c1: 0x0208, 0x16c2: 0x0208, 0x16c3: 0x0208, 0x16c4: 0x0208, 0x16c5: 0x0408, - 0x16c6: 0x0008, 0x16c7: 0x0408, 0x16c8: 0x0018, 0x16c9: 0x0408, 0x16ca: 0x0408, 0x16cb: 0x0008, - 0x16cc: 0x0008, 0x16cd: 0x0108, 0x16ce: 0x0408, 0x16cf: 0x0408, 0x16d0: 0x0408, 0x16d1: 0x0408, - 0x16d2: 0x0408, 0x16d3: 0x0208, 0x16d4: 0x0208, 0x16d5: 0x0208, 0x16d6: 0x0208, 0x16d7: 0x0108, - 0x16d8: 0x0208, 0x16d9: 0x0208, 0x16da: 0x0208, 0x16db: 0x0208, 0x16dc: 0x0208, 0x16dd: 0x0408, - 0x16de: 0x0208, 0x16df: 0x0208, 0x16e0: 0x0208, 0x16e1: 0x0408, 0x16e2: 0x0008, 0x16e3: 0x0008, - 0x16e4: 0x0408, 0x16e5: 0x1308, 0x16e6: 0x1308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040, - 0x16ea: 0x0040, 0x16eb: 0x0218, 0x16ec: 0x0218, 0x16ed: 0x0218, 0x16ee: 0x0218, 0x16ef: 0x0418, - 0x16f0: 0x0018, 0x16f1: 0x0018, 0x16f2: 0x0018, 0x16f3: 0x0018, 0x16f4: 0x0018, 0x16f5: 0x0018, - 0x16f6: 0x0018, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040, - 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, - // Block 0x5c, offset 0x1700 - 0x1700: 0x0208, 0x1701: 0x0408, 0x1702: 0x0208, 0x1703: 0x0408, 0x1704: 0x0408, 0x1705: 0x0408, - 0x1706: 0x0208, 0x1707: 0x0208, 0x1708: 0x0208, 0x1709: 0x0408, 0x170a: 0x0208, 0x170b: 0x0208, - 0x170c: 0x0408, 0x170d: 0x0208, 0x170e: 0x0408, 0x170f: 0x0408, 0x1710: 0x0208, 0x1711: 0x0408, - 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040, - 0x1718: 0x0040, 0x1719: 0x0018, 0x171a: 0x0018, 0x171b: 0x0018, 0x171c: 0x0018, 0x171d: 0x0040, - 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040, - 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0418, - 0x172a: 0x0418, 0x172b: 0x0418, 0x172c: 0x0418, 0x172d: 0x0218, 0x172e: 0x0218, 0x172f: 0x0018, - 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, - 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, - 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, - // Block 0x5d, offset 0x1740 - 0x1740: 0x1308, 0x1741: 0x1308, 0x1742: 0x1008, 0x1743: 0x1008, 0x1744: 0x0040, 0x1745: 0x0008, - 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, - 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040, - 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, - 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, - 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, - 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040, - 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008, - 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008, - 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x1308, 0x177d: 0x0008, 0x177e: 0x1008, 0x177f: 0x1008, - // Block 0x5e, offset 0x1780 - 0x1780: 0x1308, 0x1781: 0x1008, 0x1782: 0x1008, 0x1783: 0x1008, 0x1784: 0x1008, 0x1785: 0x0040, - 0x1786: 0x0040, 0x1787: 0x1008, 0x1788: 0x1008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x1008, - 0x178c: 0x1008, 0x178d: 0x1808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040, - 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x1008, - 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008, - 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x1008, 0x17a3: 0x1008, - 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x1308, 0x17a7: 0x1308, 0x17a8: 0x1308, 0x17a9: 0x1308, - 0x17aa: 0x1308, 0x17ab: 0x1308, 0x17ac: 0x1308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040, - 0x17b0: 0x1308, 0x17b1: 0x1308, 0x17b2: 0x1308, 0x17b3: 0x1308, 0x17b4: 0x1308, 0x17b5: 0x0040, - 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, - 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x0039, 0x17c1: 0x0ee9, 0x17c2: 0x1159, 0x17c3: 0x0ef9, 0x17c4: 0x0f09, 0x17c5: 0x1199, - 0x17c6: 0x0f31, 0x17c7: 0x0249, 0x17c8: 0x0f41, 0x17c9: 0x0259, 0x17ca: 0x0f51, 0x17cb: 0x0359, - 0x17cc: 0x0f61, 0x17cd: 0x0f71, 0x17ce: 0x00d9, 0x17cf: 0x0f99, 0x17d0: 0x2039, 0x17d1: 0x0269, - 0x17d2: 0x01d9, 0x17d3: 0x0fa9, 0x17d4: 0x0fb9, 0x17d5: 0x1089, 0x17d6: 0x0279, 0x17d7: 0x0369, - 0x17d8: 0x0289, 0x17d9: 0x13d1, 0x17da: 0x0039, 0x17db: 0x0ee9, 0x17dc: 0x1159, 0x17dd: 0x0ef9, - 0x17de: 0x0f09, 0x17df: 0x1199, 0x17e0: 0x0f31, 0x17e1: 0x0249, 0x17e2: 0x0f41, 0x17e3: 0x0259, - 0x17e4: 0x0f51, 0x17e5: 0x0359, 0x17e6: 0x0f61, 0x17e7: 0x0f71, 0x17e8: 0x00d9, 0x17e9: 0x0f99, - 0x17ea: 0x2039, 0x17eb: 0x0269, 0x17ec: 0x01d9, 0x17ed: 0x0fa9, 0x17ee: 0x0fb9, 0x17ef: 0x1089, - 0x17f0: 0x0279, 0x17f1: 0x0369, 0x17f2: 0x0289, 0x17f3: 0x13d1, 0x17f4: 0x0039, 0x17f5: 0x0ee9, - 0x17f6: 0x1159, 0x17f7: 0x0ef9, 0x17f8: 0x0f09, 0x17f9: 0x1199, 0x17fa: 0x0f31, 0x17fb: 0x0249, - 0x17fc: 0x0f41, 0x17fd: 0x0259, 0x17fe: 0x0f51, 0x17ff: 0x0359, - // Block 0x60, offset 0x1800 - 0x1800: 0x0f61, 0x1801: 0x0f71, 0x1802: 0x00d9, 0x1803: 0x0f99, 0x1804: 0x2039, 0x1805: 0x0269, - 0x1806: 0x01d9, 0x1807: 0x0fa9, 0x1808: 0x0fb9, 0x1809: 0x1089, 0x180a: 0x0279, 0x180b: 0x0369, - 0x180c: 0x0289, 0x180d: 0x13d1, 0x180e: 0x0039, 0x180f: 0x0ee9, 0x1810: 0x1159, 0x1811: 0x0ef9, - 0x1812: 0x0f09, 0x1813: 0x1199, 0x1814: 0x0f31, 0x1815: 0x0040, 0x1816: 0x0f41, 0x1817: 0x0259, - 0x1818: 0x0f51, 0x1819: 0x0359, 0x181a: 0x0f61, 0x181b: 0x0f71, 0x181c: 0x00d9, 0x181d: 0x0f99, - 0x181e: 0x2039, 0x181f: 0x0269, 0x1820: 0x01d9, 0x1821: 0x0fa9, 0x1822: 0x0fb9, 0x1823: 0x1089, - 0x1824: 0x0279, 0x1825: 0x0369, 0x1826: 0x0289, 0x1827: 0x13d1, 0x1828: 0x0039, 0x1829: 0x0ee9, - 0x182a: 0x1159, 0x182b: 0x0ef9, 0x182c: 0x0f09, 0x182d: 0x1199, 0x182e: 0x0f31, 0x182f: 0x0249, - 0x1830: 0x0f41, 0x1831: 0x0259, 0x1832: 0x0f51, 0x1833: 0x0359, 0x1834: 0x0f61, 0x1835: 0x0f71, - 0x1836: 0x00d9, 0x1837: 0x0f99, 0x1838: 0x2039, 0x1839: 0x0269, 0x183a: 0x01d9, 0x183b: 0x0fa9, - 0x183c: 0x0fb9, 0x183d: 0x1089, 0x183e: 0x0279, 0x183f: 0x0369, - // Block 0x61, offset 0x1840 - 0x1840: 0x0289, 0x1841: 0x13d1, 0x1842: 0x0039, 0x1843: 0x0ee9, 0x1844: 0x1159, 0x1845: 0x0ef9, - 0x1846: 0x0f09, 0x1847: 0x1199, 0x1848: 0x0f31, 0x1849: 0x0249, 0x184a: 0x0f41, 0x184b: 0x0259, - 0x184c: 0x0f51, 0x184d: 0x0359, 0x184e: 0x0f61, 0x184f: 0x0f71, 0x1850: 0x00d9, 0x1851: 0x0f99, - 0x1852: 0x2039, 0x1853: 0x0269, 0x1854: 0x01d9, 0x1855: 0x0fa9, 0x1856: 0x0fb9, 0x1857: 0x1089, - 0x1858: 0x0279, 0x1859: 0x0369, 0x185a: 0x0289, 0x185b: 0x13d1, 0x185c: 0x0039, 0x185d: 0x0040, - 0x185e: 0x1159, 0x185f: 0x0ef9, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0f31, 0x1863: 0x0040, - 0x1864: 0x0040, 0x1865: 0x0259, 0x1866: 0x0f51, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0f71, - 0x186a: 0x00d9, 0x186b: 0x0f99, 0x186c: 0x2039, 0x186d: 0x0040, 0x186e: 0x01d9, 0x186f: 0x0fa9, - 0x1870: 0x0fb9, 0x1871: 0x1089, 0x1872: 0x0279, 0x1873: 0x0369, 0x1874: 0x0289, 0x1875: 0x13d1, - 0x1876: 0x0039, 0x1877: 0x0ee9, 0x1878: 0x1159, 0x1879: 0x0ef9, 0x187a: 0x0040, 0x187b: 0x1199, - 0x187c: 0x0040, 0x187d: 0x0249, 0x187e: 0x0f41, 0x187f: 0x0259, - // Block 0x62, offset 0x1880 - 0x1880: 0x0f51, 0x1881: 0x0359, 0x1882: 0x0f61, 0x1883: 0x0f71, 0x1884: 0x0040, 0x1885: 0x0f99, - 0x1886: 0x2039, 0x1887: 0x0269, 0x1888: 0x01d9, 0x1889: 0x0fa9, 0x188a: 0x0fb9, 0x188b: 0x1089, - 0x188c: 0x0279, 0x188d: 0x0369, 0x188e: 0x0289, 0x188f: 0x13d1, 0x1890: 0x0039, 0x1891: 0x0ee9, - 0x1892: 0x1159, 0x1893: 0x0ef9, 0x1894: 0x0f09, 0x1895: 0x1199, 0x1896: 0x0f31, 0x1897: 0x0249, - 0x1898: 0x0f41, 0x1899: 0x0259, 0x189a: 0x0f51, 0x189b: 0x0359, 0x189c: 0x0f61, 0x189d: 0x0f71, - 0x189e: 0x00d9, 0x189f: 0x0f99, 0x18a0: 0x2039, 0x18a1: 0x0269, 0x18a2: 0x01d9, 0x18a3: 0x0fa9, - 0x18a4: 0x0fb9, 0x18a5: 0x1089, 0x18a6: 0x0279, 0x18a7: 0x0369, 0x18a8: 0x0289, 0x18a9: 0x13d1, - 0x18aa: 0x0039, 0x18ab: 0x0ee9, 0x18ac: 0x1159, 0x18ad: 0x0ef9, 0x18ae: 0x0f09, 0x18af: 0x1199, - 0x18b0: 0x0f31, 0x18b1: 0x0249, 0x18b2: 0x0f41, 0x18b3: 0x0259, 0x18b4: 0x0f51, 0x18b5: 0x0359, - 0x18b6: 0x0f61, 0x18b7: 0x0f71, 0x18b8: 0x00d9, 0x18b9: 0x0f99, 0x18ba: 0x2039, 0x18bb: 0x0269, - 0x18bc: 0x01d9, 0x18bd: 0x0fa9, 0x18be: 0x0fb9, 0x18bf: 0x1089, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x0279, 0x18c1: 0x0369, 0x18c2: 0x0289, 0x18c3: 0x13d1, 0x18c4: 0x0039, 0x18c5: 0x0ee9, - 0x18c6: 0x0040, 0x18c7: 0x0ef9, 0x18c8: 0x0f09, 0x18c9: 0x1199, 0x18ca: 0x0f31, 0x18cb: 0x0040, - 0x18cc: 0x0040, 0x18cd: 0x0259, 0x18ce: 0x0f51, 0x18cf: 0x0359, 0x18d0: 0x0f61, 0x18d1: 0x0f71, - 0x18d2: 0x00d9, 0x18d3: 0x0f99, 0x18d4: 0x2039, 0x18d5: 0x0040, 0x18d6: 0x01d9, 0x18d7: 0x0fa9, - 0x18d8: 0x0fb9, 0x18d9: 0x1089, 0x18da: 0x0279, 0x18db: 0x0369, 0x18dc: 0x0289, 0x18dd: 0x0040, - 0x18de: 0x0039, 0x18df: 0x0ee9, 0x18e0: 0x1159, 0x18e1: 0x0ef9, 0x18e2: 0x0f09, 0x18e3: 0x1199, - 0x18e4: 0x0f31, 0x18e5: 0x0249, 0x18e6: 0x0f41, 0x18e7: 0x0259, 0x18e8: 0x0f51, 0x18e9: 0x0359, - 0x18ea: 0x0f61, 0x18eb: 0x0f71, 0x18ec: 0x00d9, 0x18ed: 0x0f99, 0x18ee: 0x2039, 0x18ef: 0x0269, - 0x18f0: 0x01d9, 0x18f1: 0x0fa9, 0x18f2: 0x0fb9, 0x18f3: 0x1089, 0x18f4: 0x0279, 0x18f5: 0x0369, - 0x18f6: 0x0289, 0x18f7: 0x13d1, 0x18f8: 0x0039, 0x18f9: 0x0ee9, 0x18fa: 0x0040, 0x18fb: 0x0ef9, - 0x18fc: 0x0f09, 0x18fd: 0x1199, 0x18fe: 0x0f31, 0x18ff: 0x0040, - // Block 0x64, offset 0x1900 - 0x1900: 0x0f41, 0x1901: 0x0259, 0x1902: 0x0f51, 0x1903: 0x0359, 0x1904: 0x0f61, 0x1905: 0x0040, - 0x1906: 0x00d9, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0040, 0x190a: 0x01d9, 0x190b: 0x0fa9, - 0x190c: 0x0fb9, 0x190d: 0x1089, 0x190e: 0x0279, 0x190f: 0x0369, 0x1910: 0x0289, 0x1911: 0x0040, - 0x1912: 0x0039, 0x1913: 0x0ee9, 0x1914: 0x1159, 0x1915: 0x0ef9, 0x1916: 0x0f09, 0x1917: 0x1199, - 0x1918: 0x0f31, 0x1919: 0x0249, 0x191a: 0x0f41, 0x191b: 0x0259, 0x191c: 0x0f51, 0x191d: 0x0359, - 0x191e: 0x0f61, 0x191f: 0x0f71, 0x1920: 0x00d9, 0x1921: 0x0f99, 0x1922: 0x2039, 0x1923: 0x0269, - 0x1924: 0x01d9, 0x1925: 0x0fa9, 0x1926: 0x0fb9, 0x1927: 0x1089, 0x1928: 0x0279, 0x1929: 0x0369, - 0x192a: 0x0289, 0x192b: 0x13d1, 0x192c: 0x0039, 0x192d: 0x0ee9, 0x192e: 0x1159, 0x192f: 0x0ef9, - 0x1930: 0x0f09, 0x1931: 0x1199, 0x1932: 0x0f31, 0x1933: 0x0249, 0x1934: 0x0f41, 0x1935: 0x0259, - 0x1936: 0x0f51, 0x1937: 0x0359, 0x1938: 0x0f61, 0x1939: 0x0f71, 0x193a: 0x00d9, 0x193b: 0x0f99, - 0x193c: 0x2039, 0x193d: 0x0269, 0x193e: 0x01d9, 0x193f: 0x0fa9, - // Block 0x65, offset 0x1940 - 0x1940: 0x0fb9, 0x1941: 0x1089, 0x1942: 0x0279, 0x1943: 0x0369, 0x1944: 0x0289, 0x1945: 0x13d1, - 0x1946: 0x0039, 0x1947: 0x0ee9, 0x1948: 0x1159, 0x1949: 0x0ef9, 0x194a: 0x0f09, 0x194b: 0x1199, - 0x194c: 0x0f31, 0x194d: 0x0249, 0x194e: 0x0f41, 0x194f: 0x0259, 0x1950: 0x0f51, 0x1951: 0x0359, - 0x1952: 0x0f61, 0x1953: 0x0f71, 0x1954: 0x00d9, 0x1955: 0x0f99, 0x1956: 0x2039, 0x1957: 0x0269, - 0x1958: 0x01d9, 0x1959: 0x0fa9, 0x195a: 0x0fb9, 0x195b: 0x1089, 0x195c: 0x0279, 0x195d: 0x0369, - 0x195e: 0x0289, 0x195f: 0x13d1, 0x1960: 0x0039, 0x1961: 0x0ee9, 0x1962: 0x1159, 0x1963: 0x0ef9, - 0x1964: 0x0f09, 0x1965: 0x1199, 0x1966: 0x0f31, 0x1967: 0x0249, 0x1968: 0x0f41, 0x1969: 0x0259, - 0x196a: 0x0f51, 0x196b: 0x0359, 0x196c: 0x0f61, 0x196d: 0x0f71, 0x196e: 0x00d9, 0x196f: 0x0f99, - 0x1970: 0x2039, 0x1971: 0x0269, 0x1972: 0x01d9, 0x1973: 0x0fa9, 0x1974: 0x0fb9, 0x1975: 0x1089, - 0x1976: 0x0279, 0x1977: 0x0369, 0x1978: 0x0289, 0x1979: 0x13d1, 0x197a: 0x0039, 0x197b: 0x0ee9, - 0x197c: 0x1159, 0x197d: 0x0ef9, 0x197e: 0x0f09, 0x197f: 0x1199, - // Block 0x66, offset 0x1980 - 0x1980: 0x0f31, 0x1981: 0x0249, 0x1982: 0x0f41, 0x1983: 0x0259, 0x1984: 0x0f51, 0x1985: 0x0359, - 0x1986: 0x0f61, 0x1987: 0x0f71, 0x1988: 0x00d9, 0x1989: 0x0f99, 0x198a: 0x2039, 0x198b: 0x0269, - 0x198c: 0x01d9, 0x198d: 0x0fa9, 0x198e: 0x0fb9, 0x198f: 0x1089, 0x1990: 0x0279, 0x1991: 0x0369, - 0x1992: 0x0289, 0x1993: 0x13d1, 0x1994: 0x0039, 0x1995: 0x0ee9, 0x1996: 0x1159, 0x1997: 0x0ef9, - 0x1998: 0x0f09, 0x1999: 0x1199, 0x199a: 0x0f31, 0x199b: 0x0249, 0x199c: 0x0f41, 0x199d: 0x0259, - 0x199e: 0x0f51, 0x199f: 0x0359, 0x19a0: 0x0f61, 0x19a1: 0x0f71, 0x19a2: 0x00d9, 0x19a3: 0x0f99, - 0x19a4: 0x2039, 0x19a5: 0x0269, 0x19a6: 0x01d9, 0x19a7: 0x0fa9, 0x19a8: 0x0fb9, 0x19a9: 0x1089, - 0x19aa: 0x0279, 0x19ab: 0x0369, 0x19ac: 0x0289, 0x19ad: 0x13d1, 0x19ae: 0x0039, 0x19af: 0x0ee9, - 0x19b0: 0x1159, 0x19b1: 0x0ef9, 0x19b2: 0x0f09, 0x19b3: 0x1199, 0x19b4: 0x0f31, 0x19b5: 0x0249, - 0x19b6: 0x0f41, 0x19b7: 0x0259, 0x19b8: 0x0f51, 0x19b9: 0x0359, 0x19ba: 0x0f61, 0x19bb: 0x0f71, - 0x19bc: 0x00d9, 0x19bd: 0x0f99, 0x19be: 0x2039, 0x19bf: 0x0269, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x01d9, 0x19c1: 0x0fa9, 0x19c2: 0x0fb9, 0x19c3: 0x1089, 0x19c4: 0x0279, 0x19c5: 0x0369, - 0x19c6: 0x0289, 0x19c7: 0x13d1, 0x19c8: 0x0039, 0x19c9: 0x0ee9, 0x19ca: 0x1159, 0x19cb: 0x0ef9, - 0x19cc: 0x0f09, 0x19cd: 0x1199, 0x19ce: 0x0f31, 0x19cf: 0x0249, 0x19d0: 0x0f41, 0x19d1: 0x0259, - 0x19d2: 0x0f51, 0x19d3: 0x0359, 0x19d4: 0x0f61, 0x19d5: 0x0f71, 0x19d6: 0x00d9, 0x19d7: 0x0f99, - 0x19d8: 0x2039, 0x19d9: 0x0269, 0x19da: 0x01d9, 0x19db: 0x0fa9, 0x19dc: 0x0fb9, 0x19dd: 0x1089, - 0x19de: 0x0279, 0x19df: 0x0369, 0x19e0: 0x0289, 0x19e1: 0x13d1, 0x19e2: 0x0039, 0x19e3: 0x0ee9, - 0x19e4: 0x1159, 0x19e5: 0x0ef9, 0x19e6: 0x0f09, 0x19e7: 0x1199, 0x19e8: 0x0f31, 0x19e9: 0x0249, - 0x19ea: 0x0f41, 0x19eb: 0x0259, 0x19ec: 0x0f51, 0x19ed: 0x0359, 0x19ee: 0x0f61, 0x19ef: 0x0f71, - 0x19f0: 0x00d9, 0x19f1: 0x0f99, 0x19f2: 0x2039, 0x19f3: 0x0269, 0x19f4: 0x01d9, 0x19f5: 0x0fa9, - 0x19f6: 0x0fb9, 0x19f7: 0x1089, 0x19f8: 0x0279, 0x19f9: 0x0369, 0x19fa: 0x0289, 0x19fb: 0x13d1, - 0x19fc: 0x0039, 0x19fd: 0x0ee9, 0x19fe: 0x1159, 0x19ff: 0x0ef9, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x0f09, 0x1a01: 0x1199, 0x1a02: 0x0f31, 0x1a03: 0x0249, 0x1a04: 0x0f41, 0x1a05: 0x0259, - 0x1a06: 0x0f51, 0x1a07: 0x0359, 0x1a08: 0x0f61, 0x1a09: 0x0f71, 0x1a0a: 0x00d9, 0x1a0b: 0x0f99, - 0x1a0c: 0x2039, 0x1a0d: 0x0269, 0x1a0e: 0x01d9, 0x1a0f: 0x0fa9, 0x1a10: 0x0fb9, 0x1a11: 0x1089, - 0x1a12: 0x0279, 0x1a13: 0x0369, 0x1a14: 0x0289, 0x1a15: 0x13d1, 0x1a16: 0x0039, 0x1a17: 0x0ee9, - 0x1a18: 0x1159, 0x1a19: 0x0ef9, 0x1a1a: 0x0f09, 0x1a1b: 0x1199, 0x1a1c: 0x0f31, 0x1a1d: 0x0249, - 0x1a1e: 0x0f41, 0x1a1f: 0x0259, 0x1a20: 0x0f51, 0x1a21: 0x0359, 0x1a22: 0x0f61, 0x1a23: 0x0f71, - 0x1a24: 0x00d9, 0x1a25: 0x0f99, 0x1a26: 0x2039, 0x1a27: 0x0269, 0x1a28: 0x01d9, 0x1a29: 0x0fa9, - 0x1a2a: 0x0fb9, 0x1a2b: 0x1089, 0x1a2c: 0x0279, 0x1a2d: 0x0369, 0x1a2e: 0x0289, 0x1a2f: 0x13d1, - 0x1a30: 0x0039, 0x1a31: 0x0ee9, 0x1a32: 0x1159, 0x1a33: 0x0ef9, 0x1a34: 0x0f09, 0x1a35: 0x1199, - 0x1a36: 0x0f31, 0x1a37: 0x0249, 0x1a38: 0x0f41, 0x1a39: 0x0259, 0x1a3a: 0x0f51, 0x1a3b: 0x0359, - 0x1a3c: 0x0f61, 0x1a3d: 0x0f71, 0x1a3e: 0x00d9, 0x1a3f: 0x0f99, - // Block 0x69, offset 0x1a40 - 0x1a40: 0x2039, 0x1a41: 0x0269, 0x1a42: 0x01d9, 0x1a43: 0x0fa9, 0x1a44: 0x0fb9, 0x1a45: 0x1089, - 0x1a46: 0x0279, 0x1a47: 0x0369, 0x1a48: 0x0289, 0x1a49: 0x13d1, 0x1a4a: 0x0039, 0x1a4b: 0x0ee9, - 0x1a4c: 0x1159, 0x1a4d: 0x0ef9, 0x1a4e: 0x0f09, 0x1a4f: 0x1199, 0x1a50: 0x0f31, 0x1a51: 0x0249, - 0x1a52: 0x0f41, 0x1a53: 0x0259, 0x1a54: 0x0f51, 0x1a55: 0x0359, 0x1a56: 0x0f61, 0x1a57: 0x0f71, - 0x1a58: 0x00d9, 0x1a59: 0x0f99, 0x1a5a: 0x2039, 0x1a5b: 0x0269, 0x1a5c: 0x01d9, 0x1a5d: 0x0fa9, - 0x1a5e: 0x0fb9, 0x1a5f: 0x1089, 0x1a60: 0x0279, 0x1a61: 0x0369, 0x1a62: 0x0289, 0x1a63: 0x13d1, - 0x1a64: 0xba81, 0x1a65: 0xba99, 0x1a66: 0x0040, 0x1a67: 0x0040, 0x1a68: 0xbab1, 0x1a69: 0x1099, - 0x1a6a: 0x10b1, 0x1a6b: 0x10c9, 0x1a6c: 0xbac9, 0x1a6d: 0xbae1, 0x1a6e: 0xbaf9, 0x1a6f: 0x1429, - 0x1a70: 0x1a31, 0x1a71: 0xbb11, 0x1a72: 0xbb29, 0x1a73: 0xbb41, 0x1a74: 0xbb59, 0x1a75: 0xbb71, - 0x1a76: 0xbb89, 0x1a77: 0x2109, 0x1a78: 0x1111, 0x1a79: 0x1429, 0x1a7a: 0xbba1, 0x1a7b: 0xbbb9, - 0x1a7c: 0xbbd1, 0x1a7d: 0x10e1, 0x1a7e: 0x10f9, 0x1a7f: 0xbbe9, - // Block 0x6a, offset 0x1a80 - 0x1a80: 0x2079, 0x1a81: 0xbc01, 0x1a82: 0xbab1, 0x1a83: 0x1099, 0x1a84: 0x10b1, 0x1a85: 0x10c9, - 0x1a86: 0xbac9, 0x1a87: 0xbae1, 0x1a88: 0xbaf9, 0x1a89: 0x1429, 0x1a8a: 0x1a31, 0x1a8b: 0xbb11, - 0x1a8c: 0xbb29, 0x1a8d: 0xbb41, 0x1a8e: 0xbb59, 0x1a8f: 0xbb71, 0x1a90: 0xbb89, 0x1a91: 0x2109, - 0x1a92: 0x1111, 0x1a93: 0xbba1, 0x1a94: 0xbba1, 0x1a95: 0xbbb9, 0x1a96: 0xbbd1, 0x1a97: 0x10e1, - 0x1a98: 0x10f9, 0x1a99: 0xbbe9, 0x1a9a: 0x2079, 0x1a9b: 0xbc21, 0x1a9c: 0xbac9, 0x1a9d: 0x1429, - 0x1a9e: 0xbb11, 0x1a9f: 0x10e1, 0x1aa0: 0x1111, 0x1aa1: 0x2109, 0x1aa2: 0xbab1, 0x1aa3: 0x1099, - 0x1aa4: 0x10b1, 0x1aa5: 0x10c9, 0x1aa6: 0xbac9, 0x1aa7: 0xbae1, 0x1aa8: 0xbaf9, 0x1aa9: 0x1429, - 0x1aaa: 0x1a31, 0x1aab: 0xbb11, 0x1aac: 0xbb29, 0x1aad: 0xbb41, 0x1aae: 0xbb59, 0x1aaf: 0xbb71, - 0x1ab0: 0xbb89, 0x1ab1: 0x2109, 0x1ab2: 0x1111, 0x1ab3: 0x1429, 0x1ab4: 0xbba1, 0x1ab5: 0xbbb9, - 0x1ab6: 0xbbd1, 0x1ab7: 0x10e1, 0x1ab8: 0x10f9, 0x1ab9: 0xbbe9, 0x1aba: 0x2079, 0x1abb: 0xbc01, - 0x1abc: 0xbab1, 0x1abd: 0x1099, 0x1abe: 0x10b1, 0x1abf: 0x10c9, - // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0xbac9, 0x1ac1: 0xbae1, 0x1ac2: 0xbaf9, 0x1ac3: 0x1429, 0x1ac4: 0x1a31, 0x1ac5: 0xbb11, - 0x1ac6: 0xbb29, 0x1ac7: 0xbb41, 0x1ac8: 0xbb59, 0x1ac9: 0xbb71, 0x1aca: 0xbb89, 0x1acb: 0x2109, - 0x1acc: 0x1111, 0x1acd: 0xbba1, 0x1ace: 0xbba1, 0x1acf: 0xbbb9, 0x1ad0: 0xbbd1, 0x1ad1: 0x10e1, - 0x1ad2: 0x10f9, 0x1ad3: 0xbbe9, 0x1ad4: 0x2079, 0x1ad5: 0xbc21, 0x1ad6: 0xbac9, 0x1ad7: 0x1429, - 0x1ad8: 0xbb11, 0x1ad9: 0x10e1, 0x1ada: 0x1111, 0x1adb: 0x2109, 0x1adc: 0xbab1, 0x1add: 0x1099, - 0x1ade: 0x10b1, 0x1adf: 0x10c9, 0x1ae0: 0xbac9, 0x1ae1: 0xbae1, 0x1ae2: 0xbaf9, 0x1ae3: 0x1429, - 0x1ae4: 0x1a31, 0x1ae5: 0xbb11, 0x1ae6: 0xbb29, 0x1ae7: 0xbb41, 0x1ae8: 0xbb59, 0x1ae9: 0xbb71, - 0x1aea: 0xbb89, 0x1aeb: 0x2109, 0x1aec: 0x1111, 0x1aed: 0x1429, 0x1aee: 0xbba1, 0x1aef: 0xbbb9, - 0x1af0: 0xbbd1, 0x1af1: 0x10e1, 0x1af2: 0x10f9, 0x1af3: 0xbbe9, 0x1af4: 0x2079, 0x1af5: 0xbc01, - 0x1af6: 0xbab1, 0x1af7: 0x1099, 0x1af8: 0x10b1, 0x1af9: 0x10c9, 0x1afa: 0xbac9, 0x1afb: 0xbae1, - 0x1afc: 0xbaf9, 0x1afd: 0x1429, 0x1afe: 0x1a31, 0x1aff: 0xbb11, - // Block 0x6c, offset 0x1b00 - 0x1b00: 0xbb29, 0x1b01: 0xbb41, 0x1b02: 0xbb59, 0x1b03: 0xbb71, 0x1b04: 0xbb89, 0x1b05: 0x2109, - 0x1b06: 0x1111, 0x1b07: 0xbba1, 0x1b08: 0xbba1, 0x1b09: 0xbbb9, 0x1b0a: 0xbbd1, 0x1b0b: 0x10e1, - 0x1b0c: 0x10f9, 0x1b0d: 0xbbe9, 0x1b0e: 0x2079, 0x1b0f: 0xbc21, 0x1b10: 0xbac9, 0x1b11: 0x1429, - 0x1b12: 0xbb11, 0x1b13: 0x10e1, 0x1b14: 0x1111, 0x1b15: 0x2109, 0x1b16: 0xbab1, 0x1b17: 0x1099, - 0x1b18: 0x10b1, 0x1b19: 0x10c9, 0x1b1a: 0xbac9, 0x1b1b: 0xbae1, 0x1b1c: 0xbaf9, 0x1b1d: 0x1429, - 0x1b1e: 0x1a31, 0x1b1f: 0xbb11, 0x1b20: 0xbb29, 0x1b21: 0xbb41, 0x1b22: 0xbb59, 0x1b23: 0xbb71, - 0x1b24: 0xbb89, 0x1b25: 0x2109, 0x1b26: 0x1111, 0x1b27: 0x1429, 0x1b28: 0xbba1, 0x1b29: 0xbbb9, - 0x1b2a: 0xbbd1, 0x1b2b: 0x10e1, 0x1b2c: 0x10f9, 0x1b2d: 0xbbe9, 0x1b2e: 0x2079, 0x1b2f: 0xbc01, - 0x1b30: 0xbab1, 0x1b31: 0x1099, 0x1b32: 0x10b1, 0x1b33: 0x10c9, 0x1b34: 0xbac9, 0x1b35: 0xbae1, - 0x1b36: 0xbaf9, 0x1b37: 0x1429, 0x1b38: 0x1a31, 0x1b39: 0xbb11, 0x1b3a: 0xbb29, 0x1b3b: 0xbb41, - 0x1b3c: 0xbb59, 0x1b3d: 0xbb71, 0x1b3e: 0xbb89, 0x1b3f: 0x2109, - // Block 0x6d, offset 0x1b40 - 0x1b40: 0x1111, 0x1b41: 0xbba1, 0x1b42: 0xbba1, 0x1b43: 0xbbb9, 0x1b44: 0xbbd1, 0x1b45: 0x10e1, - 0x1b46: 0x10f9, 0x1b47: 0xbbe9, 0x1b48: 0x2079, 0x1b49: 0xbc21, 0x1b4a: 0xbac9, 0x1b4b: 0x1429, - 0x1b4c: 0xbb11, 0x1b4d: 0x10e1, 0x1b4e: 0x1111, 0x1b4f: 0x2109, 0x1b50: 0xbab1, 0x1b51: 0x1099, - 0x1b52: 0x10b1, 0x1b53: 0x10c9, 0x1b54: 0xbac9, 0x1b55: 0xbae1, 0x1b56: 0xbaf9, 0x1b57: 0x1429, - 0x1b58: 0x1a31, 0x1b59: 0xbb11, 0x1b5a: 0xbb29, 0x1b5b: 0xbb41, 0x1b5c: 0xbb59, 0x1b5d: 0xbb71, - 0x1b5e: 0xbb89, 0x1b5f: 0x2109, 0x1b60: 0x1111, 0x1b61: 0x1429, 0x1b62: 0xbba1, 0x1b63: 0xbbb9, - 0x1b64: 0xbbd1, 0x1b65: 0x10e1, 0x1b66: 0x10f9, 0x1b67: 0xbbe9, 0x1b68: 0x2079, 0x1b69: 0xbc01, - 0x1b6a: 0xbab1, 0x1b6b: 0x1099, 0x1b6c: 0x10b1, 0x1b6d: 0x10c9, 0x1b6e: 0xbac9, 0x1b6f: 0xbae1, - 0x1b70: 0xbaf9, 0x1b71: 0x1429, 0x1b72: 0x1a31, 0x1b73: 0xbb11, 0x1b74: 0xbb29, 0x1b75: 0xbb41, - 0x1b76: 0xbb59, 0x1b77: 0xbb71, 0x1b78: 0xbb89, 0x1b79: 0x2109, 0x1b7a: 0x1111, 0x1b7b: 0xbba1, - 0x1b7c: 0xbba1, 0x1b7d: 0xbbb9, 0x1b7e: 0xbbd1, 0x1b7f: 0x10e1, - // Block 0x6e, offset 0x1b80 - 0x1b80: 0x10f9, 0x1b81: 0xbbe9, 0x1b82: 0x2079, 0x1b83: 0xbc21, 0x1b84: 0xbac9, 0x1b85: 0x1429, - 0x1b86: 0xbb11, 0x1b87: 0x10e1, 0x1b88: 0x1111, 0x1b89: 0x2109, 0x1b8a: 0xbc41, 0x1b8b: 0xbc41, - 0x1b8c: 0x0040, 0x1b8d: 0x0040, 0x1b8e: 0x1f41, 0x1b8f: 0x00c9, 0x1b90: 0x0069, 0x1b91: 0x0079, - 0x1b92: 0x1f51, 0x1b93: 0x1f61, 0x1b94: 0x1f71, 0x1b95: 0x1f81, 0x1b96: 0x1f91, 0x1b97: 0x1fa1, - 0x1b98: 0x1f41, 0x1b99: 0x00c9, 0x1b9a: 0x0069, 0x1b9b: 0x0079, 0x1b9c: 0x1f51, 0x1b9d: 0x1f61, - 0x1b9e: 0x1f71, 0x1b9f: 0x1f81, 0x1ba0: 0x1f91, 0x1ba1: 0x1fa1, 0x1ba2: 0x1f41, 0x1ba3: 0x00c9, - 0x1ba4: 0x0069, 0x1ba5: 0x0079, 0x1ba6: 0x1f51, 0x1ba7: 0x1f61, 0x1ba8: 0x1f71, 0x1ba9: 0x1f81, - 0x1baa: 0x1f91, 0x1bab: 0x1fa1, 0x1bac: 0x1f41, 0x1bad: 0x00c9, 0x1bae: 0x0069, 0x1baf: 0x0079, - 0x1bb0: 0x1f51, 0x1bb1: 0x1f61, 0x1bb2: 0x1f71, 0x1bb3: 0x1f81, 0x1bb4: 0x1f91, 0x1bb5: 0x1fa1, - 0x1bb6: 0x1f41, 0x1bb7: 0x00c9, 0x1bb8: 0x0069, 0x1bb9: 0x0079, 0x1bba: 0x1f51, 0x1bbb: 0x1f61, - 0x1bbc: 0x1f71, 0x1bbd: 0x1f81, 0x1bbe: 0x1f91, 0x1bbf: 0x1fa1, - // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0xe115, 0x1bc1: 0xe115, 0x1bc2: 0xe135, 0x1bc3: 0xe135, 0x1bc4: 0xe115, 0x1bc5: 0xe115, - 0x1bc6: 0xe175, 0x1bc7: 0xe175, 0x1bc8: 0xe115, 0x1bc9: 0xe115, 0x1bca: 0xe135, 0x1bcb: 0xe135, - 0x1bcc: 0xe115, 0x1bcd: 0xe115, 0x1bce: 0xe1f5, 0x1bcf: 0xe1f5, 0x1bd0: 0xe115, 0x1bd1: 0xe115, - 0x1bd2: 0xe135, 0x1bd3: 0xe135, 0x1bd4: 0xe115, 0x1bd5: 0xe115, 0x1bd6: 0xe175, 0x1bd7: 0xe175, - 0x1bd8: 0xe115, 0x1bd9: 0xe115, 0x1bda: 0xe135, 0x1bdb: 0xe135, 0x1bdc: 0xe115, 0x1bdd: 0xe115, - 0x1bde: 0x8b05, 0x1bdf: 0x8b05, 0x1be0: 0x04b5, 0x1be1: 0x04b5, 0x1be2: 0x0208, 0x1be3: 0x0208, - 0x1be4: 0x0208, 0x1be5: 0x0208, 0x1be6: 0x0208, 0x1be7: 0x0208, 0x1be8: 0x0208, 0x1be9: 0x0208, - 0x1bea: 0x0208, 0x1beb: 0x0208, 0x1bec: 0x0208, 0x1bed: 0x0208, 0x1bee: 0x0208, 0x1bef: 0x0208, - 0x1bf0: 0x0208, 0x1bf1: 0x0208, 0x1bf2: 0x0208, 0x1bf3: 0x0208, 0x1bf4: 0x0208, 0x1bf5: 0x0208, - 0x1bf6: 0x0208, 0x1bf7: 0x0208, 0x1bf8: 0x0208, 0x1bf9: 0x0208, 0x1bfa: 0x0208, 0x1bfb: 0x0208, - 0x1bfc: 0x0208, 0x1bfd: 0x0208, 0x1bfe: 0x0208, 0x1bff: 0x0208, - // Block 0x70, offset 0x1c00 - 0x1c00: 0xb189, 0x1c01: 0xb1a1, 0x1c02: 0xb201, 0x1c03: 0xb249, 0x1c04: 0x0040, 0x1c05: 0xb411, - 0x1c06: 0xb291, 0x1c07: 0xb219, 0x1c08: 0xb309, 0x1c09: 0xb429, 0x1c0a: 0xb399, 0x1c0b: 0xb3b1, - 0x1c0c: 0xb3c9, 0x1c0d: 0xb3e1, 0x1c0e: 0xb2a9, 0x1c0f: 0xb339, 0x1c10: 0xb369, 0x1c11: 0xb2d9, - 0x1c12: 0xb381, 0x1c13: 0xb279, 0x1c14: 0xb2c1, 0x1c15: 0xb1d1, 0x1c16: 0xb1e9, 0x1c17: 0xb231, - 0x1c18: 0xb261, 0x1c19: 0xb2f1, 0x1c1a: 0xb321, 0x1c1b: 0xb351, 0x1c1c: 0xbc59, 0x1c1d: 0x7949, - 0x1c1e: 0xbc71, 0x1c1f: 0xbc89, 0x1c20: 0x0040, 0x1c21: 0xb1a1, 0x1c22: 0xb201, 0x1c23: 0x0040, - 0x1c24: 0xb3f9, 0x1c25: 0x0040, 0x1c26: 0x0040, 0x1c27: 0xb219, 0x1c28: 0x0040, 0x1c29: 0xb429, - 0x1c2a: 0xb399, 0x1c2b: 0xb3b1, 0x1c2c: 0xb3c9, 0x1c2d: 0xb3e1, 0x1c2e: 0xb2a9, 0x1c2f: 0xb339, - 0x1c30: 0xb369, 0x1c31: 0xb2d9, 0x1c32: 0xb381, 0x1c33: 0x0040, 0x1c34: 0xb2c1, 0x1c35: 0xb1d1, - 0x1c36: 0xb1e9, 0x1c37: 0xb231, 0x1c38: 0x0040, 0x1c39: 0xb2f1, 0x1c3a: 0x0040, 0x1c3b: 0xb351, - 0x1c3c: 0x0040, 0x1c3d: 0x0040, 0x1c3e: 0x0040, 0x1c3f: 0x0040, - // Block 0x71, offset 0x1c40 - 0x1c40: 0x0040, 0x1c41: 0x0040, 0x1c42: 0xb201, 0x1c43: 0x0040, 0x1c44: 0x0040, 0x1c45: 0x0040, - 0x1c46: 0x0040, 0x1c47: 0xb219, 0x1c48: 0x0040, 0x1c49: 0xb429, 0x1c4a: 0x0040, 0x1c4b: 0xb3b1, - 0x1c4c: 0x0040, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0x0040, 0x1c51: 0xb2d9, - 0x1c52: 0xb381, 0x1c53: 0x0040, 0x1c54: 0xb2c1, 0x1c55: 0x0040, 0x1c56: 0x0040, 0x1c57: 0xb231, - 0x1c58: 0x0040, 0x1c59: 0xb2f1, 0x1c5a: 0x0040, 0x1c5b: 0xb351, 0x1c5c: 0x0040, 0x1c5d: 0x7949, - 0x1c5e: 0x0040, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, - 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0xb309, 0x1c69: 0xb429, - 0x1c6a: 0xb399, 0x1c6b: 0x0040, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, - 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, - 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0xb321, 0x1c7b: 0xb351, - 0x1c7c: 0xbc59, 0x1c7d: 0x0040, 0x1c7e: 0xbc71, 0x1c7f: 0x0040, - // Block 0x72, offset 0x1c80 - 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0xb3f9, 0x1c85: 0xb411, - 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, - 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, - 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, - 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x0040, - 0x1c9e: 0x0040, 0x1c9f: 0x0040, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0xb249, - 0x1ca4: 0x0040, 0x1ca5: 0xb411, 0x1ca6: 0xb291, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, - 0x1caa: 0x0040, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, - 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0xb279, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, - 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0xb261, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, - 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, - // Block 0x73, offset 0x1cc0 - 0x1cc0: 0x0040, 0x1cc1: 0xbca2, 0x1cc2: 0xbcba, 0x1cc3: 0xbcd2, 0x1cc4: 0xbcea, 0x1cc5: 0xbd02, - 0x1cc6: 0xbd1a, 0x1cc7: 0xbd32, 0x1cc8: 0xbd4a, 0x1cc9: 0xbd62, 0x1cca: 0xbd7a, 0x1ccb: 0x0018, - 0x1ccc: 0x0018, 0x1ccd: 0x0040, 0x1cce: 0x0040, 0x1ccf: 0x0040, 0x1cd0: 0xbd92, 0x1cd1: 0xbdb2, - 0x1cd2: 0xbdd2, 0x1cd3: 0xbdf2, 0x1cd4: 0xbe12, 0x1cd5: 0xbe32, 0x1cd6: 0xbe52, 0x1cd7: 0xbe72, - 0x1cd8: 0xbe92, 0x1cd9: 0xbeb2, 0x1cda: 0xbed2, 0x1cdb: 0xbef2, 0x1cdc: 0xbf12, 0x1cdd: 0xbf32, - 0x1cde: 0xbf52, 0x1cdf: 0xbf72, 0x1ce0: 0xbf92, 0x1ce1: 0xbfb2, 0x1ce2: 0xbfd2, 0x1ce3: 0xbff2, - 0x1ce4: 0xc012, 0x1ce5: 0xc032, 0x1ce6: 0xc052, 0x1ce7: 0xc072, 0x1ce8: 0xc092, 0x1ce9: 0xc0b2, - 0x1cea: 0xc0d1, 0x1ceb: 0x1159, 0x1cec: 0x0269, 0x1ced: 0x6671, 0x1cee: 0xc111, 0x1cef: 0x0040, - 0x1cf0: 0x0039, 0x1cf1: 0x0ee9, 0x1cf2: 0x1159, 0x1cf3: 0x0ef9, 0x1cf4: 0x0f09, 0x1cf5: 0x1199, - 0x1cf6: 0x0f31, 0x1cf7: 0x0249, 0x1cf8: 0x0f41, 0x1cf9: 0x0259, 0x1cfa: 0x0f51, 0x1cfb: 0x0359, - 0x1cfc: 0x0f61, 0x1cfd: 0x0f71, 0x1cfe: 0x00d9, 0x1cff: 0x0f99, - // Block 0x74, offset 0x1d00 - 0x1d00: 0x2039, 0x1d01: 0x0269, 0x1d02: 0x01d9, 0x1d03: 0x0fa9, 0x1d04: 0x0fb9, 0x1d05: 0x1089, - 0x1d06: 0x0279, 0x1d07: 0x0369, 0x1d08: 0x0289, 0x1d09: 0x13d1, 0x1d0a: 0xc129, 0x1d0b: 0x65b1, - 0x1d0c: 0xc141, 0x1d0d: 0x1441, 0x1d0e: 0xc159, 0x1d0f: 0xc179, 0x1d10: 0x0018, 0x1d11: 0x0018, - 0x1d12: 0x0018, 0x1d13: 0x0018, 0x1d14: 0x0018, 0x1d15: 0x0018, 0x1d16: 0x0018, 0x1d17: 0x0018, - 0x1d18: 0x0018, 0x1d19: 0x0018, 0x1d1a: 0x0018, 0x1d1b: 0x0018, 0x1d1c: 0x0018, 0x1d1d: 0x0018, - 0x1d1e: 0x0018, 0x1d1f: 0x0018, 0x1d20: 0x0018, 0x1d21: 0x0018, 0x1d22: 0x0018, 0x1d23: 0x0018, - 0x1d24: 0x0018, 0x1d25: 0x0018, 0x1d26: 0x0018, 0x1d27: 0x0018, 0x1d28: 0x0018, 0x1d29: 0x0018, - 0x1d2a: 0xc191, 0x1d2b: 0xc1a9, 0x1d2c: 0x0040, 0x1d2d: 0x0040, 0x1d2e: 0x0040, 0x1d2f: 0x0040, - 0x1d30: 0x0018, 0x1d31: 0x0018, 0x1d32: 0x0018, 0x1d33: 0x0018, 0x1d34: 0x0018, 0x1d35: 0x0018, - 0x1d36: 0x0018, 0x1d37: 0x0018, 0x1d38: 0x0018, 0x1d39: 0x0018, 0x1d3a: 0x0018, 0x1d3b: 0x0018, - 0x1d3c: 0x0018, 0x1d3d: 0x0018, 0x1d3e: 0x0018, 0x1d3f: 0x0018, - // Block 0x75, offset 0x1d40 - 0x1d40: 0xc1d9, 0x1d41: 0xc211, 0x1d42: 0xc249, 0x1d43: 0x0040, 0x1d44: 0x0040, 0x1d45: 0x0040, - 0x1d46: 0x0040, 0x1d47: 0x0040, 0x1d48: 0x0040, 0x1d49: 0x0040, 0x1d4a: 0x0040, 0x1d4b: 0x0040, - 0x1d4c: 0x0040, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xc269, 0x1d51: 0xc289, - 0x1d52: 0xc2a9, 0x1d53: 0xc2c9, 0x1d54: 0xc2e9, 0x1d55: 0xc309, 0x1d56: 0xc329, 0x1d57: 0xc349, - 0x1d58: 0xc369, 0x1d59: 0xc389, 0x1d5a: 0xc3a9, 0x1d5b: 0xc3c9, 0x1d5c: 0xc3e9, 0x1d5d: 0xc409, - 0x1d5e: 0xc429, 0x1d5f: 0xc449, 0x1d60: 0xc469, 0x1d61: 0xc489, 0x1d62: 0xc4a9, 0x1d63: 0xc4c9, - 0x1d64: 0xc4e9, 0x1d65: 0xc509, 0x1d66: 0xc529, 0x1d67: 0xc549, 0x1d68: 0xc569, 0x1d69: 0xc589, - 0x1d6a: 0xc5a9, 0x1d6b: 0xc5c9, 0x1d6c: 0xc5e9, 0x1d6d: 0xc609, 0x1d6e: 0xc629, 0x1d6f: 0xc649, - 0x1d70: 0xc669, 0x1d71: 0xc689, 0x1d72: 0xc6a9, 0x1d73: 0xc6c9, 0x1d74: 0xc6e9, 0x1d75: 0xc709, - 0x1d76: 0xc729, 0x1d77: 0xc749, 0x1d78: 0xc769, 0x1d79: 0xc789, 0x1d7a: 0xc7a9, 0x1d7b: 0xc7c9, - 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, - // Block 0x76, offset 0x1d80 - 0x1d80: 0xcaf9, 0x1d81: 0xcb19, 0x1d82: 0xcb39, 0x1d83: 0x8b1d, 0x1d84: 0xcb59, 0x1d85: 0xcb79, - 0x1d86: 0xcb99, 0x1d87: 0xcbb9, 0x1d88: 0xcbd9, 0x1d89: 0xcbf9, 0x1d8a: 0xcc19, 0x1d8b: 0xcc39, - 0x1d8c: 0xcc59, 0x1d8d: 0x8b3d, 0x1d8e: 0xcc79, 0x1d8f: 0xcc99, 0x1d90: 0xccb9, 0x1d91: 0xccd9, - 0x1d92: 0x8b5d, 0x1d93: 0xccf9, 0x1d94: 0xcd19, 0x1d95: 0xc429, 0x1d96: 0x8b7d, 0x1d97: 0xcd39, - 0x1d98: 0xcd59, 0x1d99: 0xcd79, 0x1d9a: 0xcd99, 0x1d9b: 0xcdb9, 0x1d9c: 0x8b9d, 0x1d9d: 0xcdd9, - 0x1d9e: 0xcdf9, 0x1d9f: 0xce19, 0x1da0: 0xce39, 0x1da1: 0xce59, 0x1da2: 0xc789, 0x1da3: 0xce79, - 0x1da4: 0xce99, 0x1da5: 0xceb9, 0x1da6: 0xced9, 0x1da7: 0xcef9, 0x1da8: 0xcf19, 0x1da9: 0xcf39, - 0x1daa: 0xcf59, 0x1dab: 0xcf79, 0x1dac: 0xcf99, 0x1dad: 0xcfb9, 0x1dae: 0xcfd9, 0x1daf: 0xcff9, - 0x1db0: 0xd019, 0x1db1: 0xd039, 0x1db2: 0xd039, 0x1db3: 0xd039, 0x1db4: 0x8bbd, 0x1db5: 0xd059, - 0x1db6: 0xd079, 0x1db7: 0xd099, 0x1db8: 0x8bdd, 0x1db9: 0xd0b9, 0x1dba: 0xd0d9, 0x1dbb: 0xd0f9, - 0x1dbc: 0xd119, 0x1dbd: 0xd139, 0x1dbe: 0xd159, 0x1dbf: 0xd179, - // Block 0x77, offset 0x1dc0 - 0x1dc0: 0xd199, 0x1dc1: 0xd1b9, 0x1dc2: 0xd1d9, 0x1dc3: 0xd1f9, 0x1dc4: 0xd219, 0x1dc5: 0xd239, - 0x1dc6: 0xd239, 0x1dc7: 0xd259, 0x1dc8: 0xd279, 0x1dc9: 0xd299, 0x1dca: 0xd2b9, 0x1dcb: 0xd2d9, - 0x1dcc: 0xd2f9, 0x1dcd: 0xd319, 0x1dce: 0xd339, 0x1dcf: 0xd359, 0x1dd0: 0xd379, 0x1dd1: 0xd399, - 0x1dd2: 0xd3b9, 0x1dd3: 0xd3d9, 0x1dd4: 0xd3f9, 0x1dd5: 0xd419, 0x1dd6: 0xd439, 0x1dd7: 0xd459, - 0x1dd8: 0xd479, 0x1dd9: 0x8bfd, 0x1dda: 0xd499, 0x1ddb: 0xd4b9, 0x1ddc: 0xd4d9, 0x1ddd: 0xc309, - 0x1dde: 0xd4f9, 0x1ddf: 0xd519, 0x1de0: 0x8c1d, 0x1de1: 0x8c3d, 0x1de2: 0xd539, 0x1de3: 0xd559, - 0x1de4: 0xd579, 0x1de5: 0xd599, 0x1de6: 0xd5b9, 0x1de7: 0xd5d9, 0x1de8: 0x0040, 0x1de9: 0xd5f9, - 0x1dea: 0xd619, 0x1deb: 0xd619, 0x1dec: 0x8c5d, 0x1ded: 0xd639, 0x1dee: 0xd659, 0x1def: 0xd679, - 0x1df0: 0xd699, 0x1df1: 0x8c7d, 0x1df2: 0xd6b9, 0x1df3: 0xd6d9, 0x1df4: 0x0040, 0x1df5: 0xd6f9, - 0x1df6: 0xd719, 0x1df7: 0xd739, 0x1df8: 0xd759, 0x1df9: 0xd779, 0x1dfa: 0xd799, 0x1dfb: 0x8c9d, - 0x1dfc: 0xd7b9, 0x1dfd: 0x8cbd, 0x1dfe: 0xd7d9, 0x1dff: 0xd7f9, - // Block 0x78, offset 0x1e00 - 0x1e00: 0xd819, 0x1e01: 0xd839, 0x1e02: 0xd859, 0x1e03: 0xd879, 0x1e04: 0xd899, 0x1e05: 0xd8b9, - 0x1e06: 0xd8d9, 0x1e07: 0xd8f9, 0x1e08: 0xd919, 0x1e09: 0x8cdd, 0x1e0a: 0xd939, 0x1e0b: 0xd959, - 0x1e0c: 0xd979, 0x1e0d: 0xd999, 0x1e0e: 0xd9b9, 0x1e0f: 0x8cfd, 0x1e10: 0xd9d9, 0x1e11: 0x8d1d, - 0x1e12: 0x8d3d, 0x1e13: 0xd9f9, 0x1e14: 0xda19, 0x1e15: 0xda19, 0x1e16: 0xda39, 0x1e17: 0x8d5d, - 0x1e18: 0x8d7d, 0x1e19: 0xda59, 0x1e1a: 0xda79, 0x1e1b: 0xda99, 0x1e1c: 0xdab9, 0x1e1d: 0xdad9, - 0x1e1e: 0xdaf9, 0x1e1f: 0xdb19, 0x1e20: 0xdb39, 0x1e21: 0xdb59, 0x1e22: 0xdb79, 0x1e23: 0xdb99, - 0x1e24: 0x8d9d, 0x1e25: 0xdbb9, 0x1e26: 0xdbd9, 0x1e27: 0xdbf9, 0x1e28: 0xdc19, 0x1e29: 0xdbf9, - 0x1e2a: 0xdc39, 0x1e2b: 0xdc59, 0x1e2c: 0xdc79, 0x1e2d: 0xdc99, 0x1e2e: 0xdcb9, 0x1e2f: 0xdcd9, - 0x1e30: 0xdcf9, 0x1e31: 0xdd19, 0x1e32: 0xdd39, 0x1e33: 0xdd59, 0x1e34: 0xdd79, 0x1e35: 0xdd99, - 0x1e36: 0xddb9, 0x1e37: 0xddd9, 0x1e38: 0x8dbd, 0x1e39: 0xddf9, 0x1e3a: 0xde19, 0x1e3b: 0xde39, - 0x1e3c: 0xde59, 0x1e3d: 0xde79, 0x1e3e: 0x8ddd, 0x1e3f: 0xde99, - // Block 0x79, offset 0x1e40 - 0x1e40: 0xe599, 0x1e41: 0xe5b9, 0x1e42: 0xe5d9, 0x1e43: 0xe5f9, 0x1e44: 0xe619, 0x1e45: 0xe639, - 0x1e46: 0x8efd, 0x1e47: 0xe659, 0x1e48: 0xe679, 0x1e49: 0xe699, 0x1e4a: 0xe6b9, 0x1e4b: 0xe6d9, - 0x1e4c: 0xe6f9, 0x1e4d: 0x8f1d, 0x1e4e: 0xe719, 0x1e4f: 0xe739, 0x1e50: 0x8f3d, 0x1e51: 0x8f5d, - 0x1e52: 0xe759, 0x1e53: 0xe779, 0x1e54: 0xe799, 0x1e55: 0xe7b9, 0x1e56: 0xe7d9, 0x1e57: 0xe7f9, - 0x1e58: 0xe819, 0x1e59: 0xe839, 0x1e5a: 0xe859, 0x1e5b: 0x8f7d, 0x1e5c: 0xe879, 0x1e5d: 0x8f9d, - 0x1e5e: 0xe899, 0x1e5f: 0x0040, 0x1e60: 0xe8b9, 0x1e61: 0xe8d9, 0x1e62: 0xe8f9, 0x1e63: 0x8fbd, - 0x1e64: 0xe919, 0x1e65: 0xe939, 0x1e66: 0x8fdd, 0x1e67: 0x8ffd, 0x1e68: 0xe959, 0x1e69: 0xe979, - 0x1e6a: 0xe999, 0x1e6b: 0xe9b9, 0x1e6c: 0xe9d9, 0x1e6d: 0xe9d9, 0x1e6e: 0xe9f9, 0x1e6f: 0xea19, - 0x1e70: 0xea39, 0x1e71: 0xea59, 0x1e72: 0xea79, 0x1e73: 0xea99, 0x1e74: 0xeab9, 0x1e75: 0x901d, - 0x1e76: 0xead9, 0x1e77: 0x903d, 0x1e78: 0xeaf9, 0x1e79: 0x905d, 0x1e7a: 0xeb19, 0x1e7b: 0x907d, - 0x1e7c: 0x909d, 0x1e7d: 0x90bd, 0x1e7e: 0xeb39, 0x1e7f: 0xeb59, - // Block 0x7a, offset 0x1e80 - 0x1e80: 0xeb79, 0x1e81: 0x90dd, 0x1e82: 0x90fd, 0x1e83: 0x911d, 0x1e84: 0x913d, 0x1e85: 0xeb99, - 0x1e86: 0xebb9, 0x1e87: 0xebb9, 0x1e88: 0xebd9, 0x1e89: 0xebf9, 0x1e8a: 0xec19, 0x1e8b: 0xec39, - 0x1e8c: 0xec59, 0x1e8d: 0x915d, 0x1e8e: 0xec79, 0x1e8f: 0xec99, 0x1e90: 0xecb9, 0x1e91: 0xecd9, - 0x1e92: 0x917d, 0x1e93: 0xecf9, 0x1e94: 0x919d, 0x1e95: 0x91bd, 0x1e96: 0xed19, 0x1e97: 0xed39, - 0x1e98: 0xed59, 0x1e99: 0xed79, 0x1e9a: 0xed99, 0x1e9b: 0xedb9, 0x1e9c: 0x91dd, 0x1e9d: 0x91fd, - 0x1e9e: 0x921d, 0x1e9f: 0x0040, 0x1ea0: 0xedd9, 0x1ea1: 0x923d, 0x1ea2: 0xedf9, 0x1ea3: 0xee19, - 0x1ea4: 0xee39, 0x1ea5: 0x925d, 0x1ea6: 0xee59, 0x1ea7: 0xee79, 0x1ea8: 0xee99, 0x1ea9: 0xeeb9, - 0x1eaa: 0xeed9, 0x1eab: 0x927d, 0x1eac: 0xeef9, 0x1ead: 0xef19, 0x1eae: 0xef39, 0x1eaf: 0xef59, - 0x1eb0: 0xef79, 0x1eb1: 0xef99, 0x1eb2: 0x929d, 0x1eb3: 0x92bd, 0x1eb4: 0xefb9, 0x1eb5: 0x92dd, - 0x1eb6: 0xefd9, 0x1eb7: 0x92fd, 0x1eb8: 0xeff9, 0x1eb9: 0xf019, 0x1eba: 0xf039, 0x1ebb: 0x931d, - 0x1ebc: 0x933d, 0x1ebd: 0xf059, 0x1ebe: 0x935d, 0x1ebf: 0xf079, - // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xf6b9, 0x1ec1: 0xf6d9, 0x1ec2: 0xf6f9, 0x1ec3: 0xf719, 0x1ec4: 0xf739, 0x1ec5: 0x951d, - 0x1ec6: 0xf759, 0x1ec7: 0xf779, 0x1ec8: 0xf799, 0x1ec9: 0xf7b9, 0x1eca: 0xf7d9, 0x1ecb: 0x953d, - 0x1ecc: 0x955d, 0x1ecd: 0xf7f9, 0x1ece: 0xf819, 0x1ecf: 0xf839, 0x1ed0: 0xf859, 0x1ed1: 0xf879, - 0x1ed2: 0xf899, 0x1ed3: 0x957d, 0x1ed4: 0xf8b9, 0x1ed5: 0xf8d9, 0x1ed6: 0xf8f9, 0x1ed7: 0xf919, - 0x1ed8: 0x959d, 0x1ed9: 0x95bd, 0x1eda: 0xf939, 0x1edb: 0xf959, 0x1edc: 0xf979, 0x1edd: 0x95dd, - 0x1ede: 0xf999, 0x1edf: 0xf9b9, 0x1ee0: 0x6815, 0x1ee1: 0x95fd, 0x1ee2: 0xf9d9, 0x1ee3: 0xf9f9, - 0x1ee4: 0xfa19, 0x1ee5: 0x961d, 0x1ee6: 0xfa39, 0x1ee7: 0xfa59, 0x1ee8: 0xfa79, 0x1ee9: 0xfa99, - 0x1eea: 0xfab9, 0x1eeb: 0xfad9, 0x1eec: 0xfaf9, 0x1eed: 0x963d, 0x1eee: 0xfb19, 0x1eef: 0xfb39, - 0x1ef0: 0xfb59, 0x1ef1: 0x965d, 0x1ef2: 0xfb79, 0x1ef3: 0xfb99, 0x1ef4: 0xfbb9, 0x1ef5: 0xfbd9, - 0x1ef6: 0x7b35, 0x1ef7: 0x967d, 0x1ef8: 0xfbf9, 0x1ef9: 0xfc19, 0x1efa: 0xfc39, 0x1efb: 0x969d, - 0x1efc: 0xfc59, 0x1efd: 0x96bd, 0x1efe: 0xfc79, 0x1eff: 0xfc79, - // Block 0x7c, offset 0x1f00 - 0x1f00: 0xfc99, 0x1f01: 0x96dd, 0x1f02: 0xfcb9, 0x1f03: 0xfcd9, 0x1f04: 0xfcf9, 0x1f05: 0xfd19, - 0x1f06: 0xfd39, 0x1f07: 0xfd59, 0x1f08: 0xfd79, 0x1f09: 0x96fd, 0x1f0a: 0xfd99, 0x1f0b: 0xfdb9, - 0x1f0c: 0xfdd9, 0x1f0d: 0xfdf9, 0x1f0e: 0xfe19, 0x1f0f: 0xfe39, 0x1f10: 0x971d, 0x1f11: 0xfe59, - 0x1f12: 0x973d, 0x1f13: 0x975d, 0x1f14: 0x977d, 0x1f15: 0xfe79, 0x1f16: 0xfe99, 0x1f17: 0xfeb9, - 0x1f18: 0xfed9, 0x1f19: 0xfef9, 0x1f1a: 0xff19, 0x1f1b: 0xff39, 0x1f1c: 0xff59, 0x1f1d: 0x979d, - 0x1f1e: 0x0040, 0x1f1f: 0x0040, 0x1f20: 0x0040, 0x1f21: 0x0040, 0x1f22: 0x0040, 0x1f23: 0x0040, - 0x1f24: 0x0040, 0x1f25: 0x0040, 0x1f26: 0x0040, 0x1f27: 0x0040, 0x1f28: 0x0040, 0x1f29: 0x0040, - 0x1f2a: 0x0040, 0x1f2b: 0x0040, 0x1f2c: 0x0040, 0x1f2d: 0x0040, 0x1f2e: 0x0040, 0x1f2f: 0x0040, - 0x1f30: 0x0040, 0x1f31: 0x0040, 0x1f32: 0x0040, 0x1f33: 0x0040, 0x1f34: 0x0040, 0x1f35: 0x0040, - 0x1f36: 0x0040, 0x1f37: 0x0040, 0x1f38: 0x0040, 0x1f39: 0x0040, 0x1f3a: 0x0040, 0x1f3b: 0x0040, - 0x1f3c: 0x0040, 0x1f3d: 0x0040, 0x1f3e: 0x0040, 0x1f3f: 0x0040, -} - -// idnaIndex: 35 blocks, 2240 entries, 4480 bytes -// Block 0 is the zero block. -var idnaIndex = [2240]uint16{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x7b, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, - 0xc8: 0x06, 0xc9: 0x7c, 0xca: 0x7d, 0xcb: 0x07, 0xcc: 0x7e, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, - 0xd0: 0x7f, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x80, 0xd6: 0x81, 0xd7: 0x82, - 0xd8: 0x0f, 0xd9: 0x83, 0xda: 0x84, 0xdb: 0x10, 0xdc: 0x11, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, - 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, - 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, - 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, - // Block 0x4, offset 0x100 - 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x12, 0x126: 0x13, 0x127: 0x14, - 0x128: 0x15, 0x129: 0x16, 0x12a: 0x17, 0x12b: 0x18, 0x12c: 0x19, 0x12d: 0x1a, 0x12e: 0x1b, 0x12f: 0x8d, - 0x130: 0x8e, 0x131: 0x1c, 0x132: 0x1d, 0x133: 0x1e, 0x134: 0x8f, 0x135: 0x1f, 0x136: 0x90, 0x137: 0x91, - 0x138: 0x92, 0x139: 0x93, 0x13a: 0x20, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x21, 0x13e: 0x22, 0x13f: 0x96, - // Block 0x5, offset 0x140 - 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9b, 0x147: 0x9b, - 0x148: 0x9d, 0x149: 0x9e, 0x14a: 0x9f, 0x14b: 0xa0, 0x14c: 0xa1, 0x14d: 0xa2, 0x14e: 0xa3, 0x14f: 0xa4, - 0x150: 0xa5, 0x151: 0x9d, 0x152: 0x9d, 0x153: 0x9d, 0x154: 0x9d, 0x155: 0x9d, 0x156: 0x9d, 0x157: 0x9d, - 0x158: 0x9d, 0x159: 0xa6, 0x15a: 0xa7, 0x15b: 0xa8, 0x15c: 0xa9, 0x15d: 0xaa, 0x15e: 0xab, 0x15f: 0xac, - 0x160: 0xad, 0x161: 0xae, 0x162: 0xaf, 0x163: 0xb0, 0x164: 0xb1, 0x165: 0xb2, 0x166: 0xb3, 0x167: 0xb4, - 0x168: 0xb5, 0x169: 0xb6, 0x16a: 0xb7, 0x16b: 0xb8, 0x16c: 0xb9, 0x16d: 0xba, 0x16e: 0xbb, 0x16f: 0xbc, - 0x170: 0xbd, 0x171: 0xbe, 0x172: 0xbf, 0x173: 0xc0, 0x174: 0x23, 0x175: 0x24, 0x176: 0x25, 0x177: 0xc1, - 0x178: 0x26, 0x179: 0x26, 0x17a: 0x27, 0x17b: 0x26, 0x17c: 0xc2, 0x17d: 0x28, 0x17e: 0x29, 0x17f: 0x2a, - // Block 0x6, offset 0x180 - 0x180: 0x2b, 0x181: 0x2c, 0x182: 0x2d, 0x183: 0xc3, 0x184: 0x2e, 0x185: 0x2f, 0x186: 0xc4, 0x187: 0x9b, - 0x188: 0xc5, 0x189: 0xc6, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc7, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xc8, - 0x190: 0xc9, 0x191: 0x30, 0x192: 0x31, 0x193: 0x32, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, - 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, - 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, - 0x1a8: 0xca, 0x1a9: 0xcb, 0x1aa: 0x9b, 0x1ab: 0xcc, 0x1ac: 0x9b, 0x1ad: 0xcd, 0x1ae: 0xce, 0x1af: 0xcf, - 0x1b0: 0xd0, 0x1b1: 0x33, 0x1b2: 0x26, 0x1b3: 0x34, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4, - 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x35, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x36, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x37, 0x1c6: 0x38, 0x1c7: 0xe0, - 0x1c8: 0xe1, 0x1c9: 0x39, 0x1ca: 0x3a, 0x1cb: 0x3b, 0x1cc: 0x3c, 0x1cd: 0x3d, 0x1ce: 0x3e, 0x1cf: 0x3f, - 0x1d0: 0x9d, 0x1d1: 0x9d, 0x1d2: 0x9d, 0x1d3: 0x9d, 0x1d4: 0x9d, 0x1d5: 0x9d, 0x1d6: 0x9d, 0x1d7: 0x9d, - 0x1d8: 0x9d, 0x1d9: 0x9d, 0x1da: 0x9d, 0x1db: 0x9d, 0x1dc: 0x9d, 0x1dd: 0x9d, 0x1de: 0x9d, 0x1df: 0x9d, - 0x1e0: 0x9d, 0x1e1: 0x9d, 0x1e2: 0x9d, 0x1e3: 0x9d, 0x1e4: 0x9d, 0x1e5: 0x9d, 0x1e6: 0x9d, 0x1e7: 0x9d, - 0x1e8: 0x9d, 0x1e9: 0x9d, 0x1ea: 0x9d, 0x1eb: 0x9d, 0x1ec: 0x9d, 0x1ed: 0x9d, 0x1ee: 0x9d, 0x1ef: 0x9d, - 0x1f0: 0x9d, 0x1f1: 0x9d, 0x1f2: 0x9d, 0x1f3: 0x9d, 0x1f4: 0x9d, 0x1f5: 0x9d, 0x1f6: 0x9d, 0x1f7: 0x9d, - 0x1f8: 0x9d, 0x1f9: 0x9d, 0x1fa: 0x9d, 0x1fb: 0x9d, 0x1fc: 0x9d, 0x1fd: 0x9d, 0x1fe: 0x9d, 0x1ff: 0x9d, - // Block 0x8, offset 0x200 - 0x200: 0x9d, 0x201: 0x9d, 0x202: 0x9d, 0x203: 0x9d, 0x204: 0x9d, 0x205: 0x9d, 0x206: 0x9d, 0x207: 0x9d, - 0x208: 0x9d, 0x209: 0x9d, 0x20a: 0x9d, 0x20b: 0x9d, 0x20c: 0x9d, 0x20d: 0x9d, 0x20e: 0x9d, 0x20f: 0x9d, - 0x210: 0x9d, 0x211: 0x9d, 0x212: 0x9d, 0x213: 0x9d, 0x214: 0x9d, 0x215: 0x9d, 0x216: 0x9d, 0x217: 0x9d, - 0x218: 0x9d, 0x219: 0x9d, 0x21a: 0x9d, 0x21b: 0x9d, 0x21c: 0x9d, 0x21d: 0x9d, 0x21e: 0x9d, 0x21f: 0x9d, - 0x220: 0x9d, 0x221: 0x9d, 0x222: 0x9d, 0x223: 0x9d, 0x224: 0x9d, 0x225: 0x9d, 0x226: 0x9d, 0x227: 0x9d, - 0x228: 0x9d, 0x229: 0x9d, 0x22a: 0x9d, 0x22b: 0x9d, 0x22c: 0x9d, 0x22d: 0x9d, 0x22e: 0x9d, 0x22f: 0x9d, - 0x230: 0x9d, 0x231: 0x9d, 0x232: 0x9d, 0x233: 0x9d, 0x234: 0x9d, 0x235: 0x9d, 0x236: 0xb0, 0x237: 0x9b, - 0x238: 0x9d, 0x239: 0x9d, 0x23a: 0x9d, 0x23b: 0x9d, 0x23c: 0x9d, 0x23d: 0x9d, 0x23e: 0x9d, 0x23f: 0x9d, - // Block 0x9, offset 0x240 - 0x240: 0x9d, 0x241: 0x9d, 0x242: 0x9d, 0x243: 0x9d, 0x244: 0x9d, 0x245: 0x9d, 0x246: 0x9d, 0x247: 0x9d, - 0x248: 0x9d, 0x249: 0x9d, 0x24a: 0x9d, 0x24b: 0x9d, 0x24c: 0x9d, 0x24d: 0x9d, 0x24e: 0x9d, 0x24f: 0x9d, - 0x250: 0x9d, 0x251: 0x9d, 0x252: 0x9d, 0x253: 0x9d, 0x254: 0x9d, 0x255: 0x9d, 0x256: 0x9d, 0x257: 0x9d, - 0x258: 0x9d, 0x259: 0x9d, 0x25a: 0x9d, 0x25b: 0x9d, 0x25c: 0x9d, 0x25d: 0x9d, 0x25e: 0x9d, 0x25f: 0x9d, - 0x260: 0x9d, 0x261: 0x9d, 0x262: 0x9d, 0x263: 0x9d, 0x264: 0x9d, 0x265: 0x9d, 0x266: 0x9d, 0x267: 0x9d, - 0x268: 0x9d, 0x269: 0x9d, 0x26a: 0x9d, 0x26b: 0x9d, 0x26c: 0x9d, 0x26d: 0x9d, 0x26e: 0x9d, 0x26f: 0x9d, - 0x270: 0x9d, 0x271: 0x9d, 0x272: 0x9d, 0x273: 0x9d, 0x274: 0x9d, 0x275: 0x9d, 0x276: 0x9d, 0x277: 0x9d, - 0x278: 0x9d, 0x279: 0x9d, 0x27a: 0x9d, 0x27b: 0x9d, 0x27c: 0x9d, 0x27d: 0x9d, 0x27e: 0x9d, 0x27f: 0x9d, - // Block 0xa, offset 0x280 - 0x280: 0x9d, 0x281: 0x9d, 0x282: 0x9d, 0x283: 0x9d, 0x284: 0x9d, 0x285: 0x9d, 0x286: 0x9d, 0x287: 0x9d, - 0x288: 0x9d, 0x289: 0x9d, 0x28a: 0x9d, 0x28b: 0x9d, 0x28c: 0x9d, 0x28d: 0x9d, 0x28e: 0x9d, 0x28f: 0x9d, - 0x290: 0x9d, 0x291: 0x9d, 0x292: 0x9d, 0x293: 0x9d, 0x294: 0x9d, 0x295: 0x9d, 0x296: 0x9d, 0x297: 0x9d, - 0x298: 0x9d, 0x299: 0x9d, 0x29a: 0x9d, 0x29b: 0x9d, 0x29c: 0x9d, 0x29d: 0x9d, 0x29e: 0x9d, 0x29f: 0x9d, - 0x2a0: 0x9d, 0x2a1: 0x9d, 0x2a2: 0x9d, 0x2a3: 0x9d, 0x2a4: 0x9d, 0x2a5: 0x9d, 0x2a6: 0x9d, 0x2a7: 0x9d, - 0x2a8: 0x9d, 0x2a9: 0x9d, 0x2aa: 0x9d, 0x2ab: 0x9d, 0x2ac: 0x9d, 0x2ad: 0x9d, 0x2ae: 0x9d, 0x2af: 0x9d, - 0x2b0: 0x9d, 0x2b1: 0x9d, 0x2b2: 0x9d, 0x2b3: 0x9d, 0x2b4: 0x9d, 0x2b5: 0x9d, 0x2b6: 0x9d, 0x2b7: 0x9d, - 0x2b8: 0x9d, 0x2b9: 0x9d, 0x2ba: 0x9d, 0x2bb: 0x9d, 0x2bc: 0x9d, 0x2bd: 0x9d, 0x2be: 0x9d, 0x2bf: 0xe2, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x9d, 0x2c1: 0x9d, 0x2c2: 0x9d, 0x2c3: 0x9d, 0x2c4: 0x9d, 0x2c5: 0x9d, 0x2c6: 0x9d, 0x2c7: 0x9d, - 0x2c8: 0x9d, 0x2c9: 0x9d, 0x2ca: 0x9d, 0x2cb: 0x9d, 0x2cc: 0x9d, 0x2cd: 0x9d, 0x2ce: 0x9d, 0x2cf: 0x9d, - 0x2d0: 0x9d, 0x2d1: 0x9d, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9d, 0x2d5: 0x9d, 0x2d6: 0x9d, 0x2d7: 0x9d, - 0x2d8: 0xe5, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe6, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xe7, - 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef, - 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7, - 0x2f0: 0x9d, 0x2f1: 0x9d, 0x2f2: 0x9d, 0x2f3: 0x9d, 0x2f4: 0x9d, 0x2f5: 0x9d, 0x2f6: 0x9d, 0x2f7: 0x9d, - 0x2f8: 0x9d, 0x2f9: 0x9d, 0x2fa: 0x9d, 0x2fb: 0x9d, 0x2fc: 0x9d, 0x2fd: 0x9d, 0x2fe: 0x9d, 0x2ff: 0x9d, - // Block 0xc, offset 0x300 - 0x300: 0x9d, 0x301: 0x9d, 0x302: 0x9d, 0x303: 0x9d, 0x304: 0x9d, 0x305: 0x9d, 0x306: 0x9d, 0x307: 0x9d, - 0x308: 0x9d, 0x309: 0x9d, 0x30a: 0x9d, 0x30b: 0x9d, 0x30c: 0x9d, 0x30d: 0x9d, 0x30e: 0x9d, 0x30f: 0x9d, - 0x310: 0x9d, 0x311: 0x9d, 0x312: 0x9d, 0x313: 0x9d, 0x314: 0x9d, 0x315: 0x9d, 0x316: 0x9d, 0x317: 0x9d, - 0x318: 0x9d, 0x319: 0x9d, 0x31a: 0x9d, 0x31b: 0x9d, 0x31c: 0x9d, 0x31d: 0x9d, 0x31e: 0xf8, 0x31f: 0xf9, - // Block 0xd, offset 0x340 - 0x340: 0xb8, 0x341: 0xb8, 0x342: 0xb8, 0x343: 0xb8, 0x344: 0xb8, 0x345: 0xb8, 0x346: 0xb8, 0x347: 0xb8, - 0x348: 0xb8, 0x349: 0xb8, 0x34a: 0xb8, 0x34b: 0xb8, 0x34c: 0xb8, 0x34d: 0xb8, 0x34e: 0xb8, 0x34f: 0xb8, - 0x350: 0xb8, 0x351: 0xb8, 0x352: 0xb8, 0x353: 0xb8, 0x354: 0xb8, 0x355: 0xb8, 0x356: 0xb8, 0x357: 0xb8, - 0x358: 0xb8, 0x359: 0xb8, 0x35a: 0xb8, 0x35b: 0xb8, 0x35c: 0xb8, 0x35d: 0xb8, 0x35e: 0xb8, 0x35f: 0xb8, - 0x360: 0xb8, 0x361: 0xb8, 0x362: 0xb8, 0x363: 0xb8, 0x364: 0xb8, 0x365: 0xb8, 0x366: 0xb8, 0x367: 0xb8, - 0x368: 0xb8, 0x369: 0xb8, 0x36a: 0xb8, 0x36b: 0xb8, 0x36c: 0xb8, 0x36d: 0xb8, 0x36e: 0xb8, 0x36f: 0xb8, - 0x370: 0xb8, 0x371: 0xb8, 0x372: 0xb8, 0x373: 0xb8, 0x374: 0xb8, 0x375: 0xb8, 0x376: 0xb8, 0x377: 0xb8, - 0x378: 0xb8, 0x379: 0xb8, 0x37a: 0xb8, 0x37b: 0xb8, 0x37c: 0xb8, 0x37d: 0xb8, 0x37e: 0xb8, 0x37f: 0xb8, - // Block 0xe, offset 0x380 - 0x380: 0xb8, 0x381: 0xb8, 0x382: 0xb8, 0x383: 0xb8, 0x384: 0xb8, 0x385: 0xb8, 0x386: 0xb8, 0x387: 0xb8, - 0x388: 0xb8, 0x389: 0xb8, 0x38a: 0xb8, 0x38b: 0xb8, 0x38c: 0xb8, 0x38d: 0xb8, 0x38e: 0xb8, 0x38f: 0xb8, - 0x390: 0xb8, 0x391: 0xb8, 0x392: 0xb8, 0x393: 0xb8, 0x394: 0xb8, 0x395: 0xb8, 0x396: 0xb8, 0x397: 0xb8, - 0x398: 0xb8, 0x399: 0xb8, 0x39a: 0xb8, 0x39b: 0xb8, 0x39c: 0xb8, 0x39d: 0xb8, 0x39e: 0xb8, 0x39f: 0xb8, - 0x3a0: 0xb8, 0x3a1: 0xb8, 0x3a2: 0xb8, 0x3a3: 0xb8, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd, - 0x3a8: 0x45, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a, - 0x3b0: 0x100, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x101, 0x3b7: 0x50, - 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9d, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107, - 0x3c8: 0xb8, 0x3c9: 0xb8, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d, - 0x3d0: 0x10e, 0x3d1: 0x9d, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xb8, 0x3d7: 0xb8, - 0x3d8: 0x9d, 0x3d9: 0x9d, 0x3da: 0x9d, 0x3db: 0x9d, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xb8, 0x3df: 0xb8, - 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xb8, 0x3e6: 0x11a, 0x3e7: 0x11b, - 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x59, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5a, 0x3ef: 0xb8, - 0x3f0: 0x9d, 0x3f1: 0x121, 0x3f2: 0x122, 0x3f3: 0x123, 0x3f4: 0xb8, 0x3f5: 0xb8, 0x3f6: 0xb8, 0x3f7: 0xb8, - 0x3f8: 0xb8, 0x3f9: 0x124, 0x3fa: 0xb8, 0x3fb: 0xb8, 0x3fc: 0xb8, 0x3fd: 0xb8, 0x3fe: 0xb8, 0x3ff: 0xb8, - // Block 0x10, offset 0x400 - 0x400: 0x125, 0x401: 0x126, 0x402: 0x127, 0x403: 0x128, 0x404: 0x129, 0x405: 0x12a, 0x406: 0x12b, 0x407: 0x12c, - 0x408: 0x12d, 0x409: 0xb8, 0x40a: 0x12e, 0x40b: 0x12f, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xb8, 0x40f: 0xb8, - 0x410: 0x130, 0x411: 0x131, 0x412: 0x132, 0x413: 0x133, 0x414: 0xb8, 0x415: 0xb8, 0x416: 0x134, 0x417: 0x135, - 0x418: 0x136, 0x419: 0x137, 0x41a: 0x138, 0x41b: 0x139, 0x41c: 0x13a, 0x41d: 0xb8, 0x41e: 0xb8, 0x41f: 0xb8, - 0x420: 0xb8, 0x421: 0xb8, 0x422: 0x13b, 0x423: 0x13c, 0x424: 0xb8, 0x425: 0xb8, 0x426: 0xb8, 0x427: 0xb8, - 0x428: 0xb8, 0x429: 0xb8, 0x42a: 0xb8, 0x42b: 0x13d, 0x42c: 0xb8, 0x42d: 0xb8, 0x42e: 0xb8, 0x42f: 0xb8, - 0x430: 0x13e, 0x431: 0x13f, 0x432: 0x140, 0x433: 0xb8, 0x434: 0xb8, 0x435: 0xb8, 0x436: 0xb8, 0x437: 0xb8, - 0x438: 0xb8, 0x439: 0xb8, 0x43a: 0xb8, 0x43b: 0xb8, 0x43c: 0xb8, 0x43d: 0xb8, 0x43e: 0xb8, 0x43f: 0xb8, - // Block 0x11, offset 0x440 - 0x440: 0x9d, 0x441: 0x9d, 0x442: 0x9d, 0x443: 0x9d, 0x444: 0x9d, 0x445: 0x9d, 0x446: 0x9d, 0x447: 0x9d, - 0x448: 0x9d, 0x449: 0x9d, 0x44a: 0x9d, 0x44b: 0x9d, 0x44c: 0x9d, 0x44d: 0x9d, 0x44e: 0x141, 0x44f: 0xb8, - 0x450: 0x9b, 0x451: 0x142, 0x452: 0x9d, 0x453: 0x9d, 0x454: 0x9d, 0x455: 0x143, 0x456: 0xb8, 0x457: 0xb8, - 0x458: 0xb8, 0x459: 0xb8, 0x45a: 0xb8, 0x45b: 0xb8, 0x45c: 0xb8, 0x45d: 0xb8, 0x45e: 0xb8, 0x45f: 0xb8, - 0x460: 0xb8, 0x461: 0xb8, 0x462: 0xb8, 0x463: 0xb8, 0x464: 0xb8, 0x465: 0xb8, 0x466: 0xb8, 0x467: 0xb8, - 0x468: 0xb8, 0x469: 0xb8, 0x46a: 0xb8, 0x46b: 0xb8, 0x46c: 0xb8, 0x46d: 0xb8, 0x46e: 0xb8, 0x46f: 0xb8, - 0x470: 0xb8, 0x471: 0xb8, 0x472: 0xb8, 0x473: 0xb8, 0x474: 0xb8, 0x475: 0xb8, 0x476: 0xb8, 0x477: 0xb8, - 0x478: 0xb8, 0x479: 0xb8, 0x47a: 0xb8, 0x47b: 0xb8, 0x47c: 0xb8, 0x47d: 0xb8, 0x47e: 0xb8, 0x47f: 0xb8, - // Block 0x12, offset 0x480 - 0x480: 0x9d, 0x481: 0x9d, 0x482: 0x9d, 0x483: 0x9d, 0x484: 0x9d, 0x485: 0x9d, 0x486: 0x9d, 0x487: 0x9d, - 0x488: 0x9d, 0x489: 0x9d, 0x48a: 0x9d, 0x48b: 0x9d, 0x48c: 0x9d, 0x48d: 0x9d, 0x48e: 0x9d, 0x48f: 0x9d, - 0x490: 0x144, 0x491: 0xb8, 0x492: 0xb8, 0x493: 0xb8, 0x494: 0xb8, 0x495: 0xb8, 0x496: 0xb8, 0x497: 0xb8, - 0x498: 0xb8, 0x499: 0xb8, 0x49a: 0xb8, 0x49b: 0xb8, 0x49c: 0xb8, 0x49d: 0xb8, 0x49e: 0xb8, 0x49f: 0xb8, - 0x4a0: 0xb8, 0x4a1: 0xb8, 0x4a2: 0xb8, 0x4a3: 0xb8, 0x4a4: 0xb8, 0x4a5: 0xb8, 0x4a6: 0xb8, 0x4a7: 0xb8, - 0x4a8: 0xb8, 0x4a9: 0xb8, 0x4aa: 0xb8, 0x4ab: 0xb8, 0x4ac: 0xb8, 0x4ad: 0xb8, 0x4ae: 0xb8, 0x4af: 0xb8, - 0x4b0: 0xb8, 0x4b1: 0xb8, 0x4b2: 0xb8, 0x4b3: 0xb8, 0x4b4: 0xb8, 0x4b5: 0xb8, 0x4b6: 0xb8, 0x4b7: 0xb8, - 0x4b8: 0xb8, 0x4b9: 0xb8, 0x4ba: 0xb8, 0x4bb: 0xb8, 0x4bc: 0xb8, 0x4bd: 0xb8, 0x4be: 0xb8, 0x4bf: 0xb8, - // Block 0x13, offset 0x4c0 - 0x4c0: 0xb8, 0x4c1: 0xb8, 0x4c2: 0xb8, 0x4c3: 0xb8, 0x4c4: 0xb8, 0x4c5: 0xb8, 0x4c6: 0xb8, 0x4c7: 0xb8, - 0x4c8: 0xb8, 0x4c9: 0xb8, 0x4ca: 0xb8, 0x4cb: 0xb8, 0x4cc: 0xb8, 0x4cd: 0xb8, 0x4ce: 0xb8, 0x4cf: 0xb8, - 0x4d0: 0x9d, 0x4d1: 0x9d, 0x4d2: 0x9d, 0x4d3: 0x9d, 0x4d4: 0x9d, 0x4d5: 0x9d, 0x4d6: 0x9d, 0x4d7: 0x9d, - 0x4d8: 0x9d, 0x4d9: 0x145, 0x4da: 0xb8, 0x4db: 0xb8, 0x4dc: 0xb8, 0x4dd: 0xb8, 0x4de: 0xb8, 0x4df: 0xb8, - 0x4e0: 0xb8, 0x4e1: 0xb8, 0x4e2: 0xb8, 0x4e3: 0xb8, 0x4e4: 0xb8, 0x4e5: 0xb8, 0x4e6: 0xb8, 0x4e7: 0xb8, - 0x4e8: 0xb8, 0x4e9: 0xb8, 0x4ea: 0xb8, 0x4eb: 0xb8, 0x4ec: 0xb8, 0x4ed: 0xb8, 0x4ee: 0xb8, 0x4ef: 0xb8, - 0x4f0: 0xb8, 0x4f1: 0xb8, 0x4f2: 0xb8, 0x4f3: 0xb8, 0x4f4: 0xb8, 0x4f5: 0xb8, 0x4f6: 0xb8, 0x4f7: 0xb8, - 0x4f8: 0xb8, 0x4f9: 0xb8, 0x4fa: 0xb8, 0x4fb: 0xb8, 0x4fc: 0xb8, 0x4fd: 0xb8, 0x4fe: 0xb8, 0x4ff: 0xb8, - // Block 0x14, offset 0x500 - 0x500: 0xb8, 0x501: 0xb8, 0x502: 0xb8, 0x503: 0xb8, 0x504: 0xb8, 0x505: 0xb8, 0x506: 0xb8, 0x507: 0xb8, - 0x508: 0xb8, 0x509: 0xb8, 0x50a: 0xb8, 0x50b: 0xb8, 0x50c: 0xb8, 0x50d: 0xb8, 0x50e: 0xb8, 0x50f: 0xb8, - 0x510: 0xb8, 0x511: 0xb8, 0x512: 0xb8, 0x513: 0xb8, 0x514: 0xb8, 0x515: 0xb8, 0x516: 0xb8, 0x517: 0xb8, - 0x518: 0xb8, 0x519: 0xb8, 0x51a: 0xb8, 0x51b: 0xb8, 0x51c: 0xb8, 0x51d: 0xb8, 0x51e: 0xb8, 0x51f: 0xb8, - 0x520: 0x9d, 0x521: 0x9d, 0x522: 0x9d, 0x523: 0x9d, 0x524: 0x9d, 0x525: 0x9d, 0x526: 0x9d, 0x527: 0x9d, - 0x528: 0x13d, 0x529: 0x146, 0x52a: 0xb8, 0x52b: 0x147, 0x52c: 0x148, 0x52d: 0x149, 0x52e: 0x14a, 0x52f: 0xb8, - 0x530: 0xb8, 0x531: 0xb8, 0x532: 0xb8, 0x533: 0xb8, 0x534: 0xb8, 0x535: 0xb8, 0x536: 0xb8, 0x537: 0xb8, - 0x538: 0xb8, 0x539: 0xb8, 0x53a: 0xb8, 0x53b: 0xb8, 0x53c: 0x9d, 0x53d: 0x14b, 0x53e: 0x14c, 0x53f: 0x14d, - // Block 0x15, offset 0x540 - 0x540: 0x9d, 0x541: 0x9d, 0x542: 0x9d, 0x543: 0x9d, 0x544: 0x9d, 0x545: 0x9d, 0x546: 0x9d, 0x547: 0x9d, - 0x548: 0x9d, 0x549: 0x9d, 0x54a: 0x9d, 0x54b: 0x9d, 0x54c: 0x9d, 0x54d: 0x9d, 0x54e: 0x9d, 0x54f: 0x9d, - 0x550: 0x9d, 0x551: 0x9d, 0x552: 0x9d, 0x553: 0x9d, 0x554: 0x9d, 0x555: 0x9d, 0x556: 0x9d, 0x557: 0x9d, - 0x558: 0x9d, 0x559: 0x9d, 0x55a: 0x9d, 0x55b: 0x9d, 0x55c: 0x9d, 0x55d: 0x9d, 0x55e: 0x9d, 0x55f: 0x14e, - 0x560: 0x9d, 0x561: 0x9d, 0x562: 0x9d, 0x563: 0x9d, 0x564: 0x9d, 0x565: 0x9d, 0x566: 0x9d, 0x567: 0x9d, - 0x568: 0x9d, 0x569: 0x9d, 0x56a: 0x9d, 0x56b: 0x14f, 0x56c: 0xb8, 0x56d: 0xb8, 0x56e: 0xb8, 0x56f: 0xb8, - 0x570: 0xb8, 0x571: 0xb8, 0x572: 0xb8, 0x573: 0xb8, 0x574: 0xb8, 0x575: 0xb8, 0x576: 0xb8, 0x577: 0xb8, - 0x578: 0xb8, 0x579: 0xb8, 0x57a: 0xb8, 0x57b: 0xb8, 0x57c: 0xb8, 0x57d: 0xb8, 0x57e: 0xb8, 0x57f: 0xb8, - // Block 0x16, offset 0x580 - 0x580: 0x150, 0x581: 0xb8, 0x582: 0xb8, 0x583: 0xb8, 0x584: 0xb8, 0x585: 0xb8, 0x586: 0xb8, 0x587: 0xb8, - 0x588: 0xb8, 0x589: 0xb8, 0x58a: 0xb8, 0x58b: 0xb8, 0x58c: 0xb8, 0x58d: 0xb8, 0x58e: 0xb8, 0x58f: 0xb8, - 0x590: 0xb8, 0x591: 0xb8, 0x592: 0xb8, 0x593: 0xb8, 0x594: 0xb8, 0x595: 0xb8, 0x596: 0xb8, 0x597: 0xb8, - 0x598: 0xb8, 0x599: 0xb8, 0x59a: 0xb8, 0x59b: 0xb8, 0x59c: 0xb8, 0x59d: 0xb8, 0x59e: 0xb8, 0x59f: 0xb8, - 0x5a0: 0xb8, 0x5a1: 0xb8, 0x5a2: 0xb8, 0x5a3: 0xb8, 0x5a4: 0xb8, 0x5a5: 0xb8, 0x5a6: 0xb8, 0x5a7: 0xb8, - 0x5a8: 0xb8, 0x5a9: 0xb8, 0x5aa: 0xb8, 0x5ab: 0xb8, 0x5ac: 0xb8, 0x5ad: 0xb8, 0x5ae: 0xb8, 0x5af: 0xb8, - 0x5b0: 0x9d, 0x5b1: 0x151, 0x5b2: 0x152, 0x5b3: 0xb8, 0x5b4: 0xb8, 0x5b5: 0xb8, 0x5b6: 0xb8, 0x5b7: 0xb8, - 0x5b8: 0xb8, 0x5b9: 0xb8, 0x5ba: 0xb8, 0x5bb: 0xb8, 0x5bc: 0xb8, 0x5bd: 0xb8, 0x5be: 0xb8, 0x5bf: 0xb8, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x153, 0x5c4: 0x154, 0x5c5: 0x155, 0x5c6: 0x156, 0x5c7: 0x157, - 0x5c8: 0x9b, 0x5c9: 0x158, 0x5ca: 0xb8, 0x5cb: 0xb8, 0x5cc: 0x9b, 0x5cd: 0x159, 0x5ce: 0xb8, 0x5cf: 0xb8, - 0x5d0: 0x5d, 0x5d1: 0x5e, 0x5d2: 0x5f, 0x5d3: 0x60, 0x5d4: 0x61, 0x5d5: 0x62, 0x5d6: 0x63, 0x5d7: 0x64, - 0x5d8: 0x65, 0x5d9: 0x66, 0x5da: 0x67, 0x5db: 0x68, 0x5dc: 0x69, 0x5dd: 0x6a, 0x5de: 0x6b, 0x5df: 0x6c, - 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, - 0x5e8: 0x15a, 0x5e9: 0x15b, 0x5ea: 0x15c, 0x5eb: 0xb8, 0x5ec: 0xb8, 0x5ed: 0xb8, 0x5ee: 0xb8, 0x5ef: 0xb8, - 0x5f0: 0xb8, 0x5f1: 0xb8, 0x5f2: 0xb8, 0x5f3: 0xb8, 0x5f4: 0xb8, 0x5f5: 0xb8, 0x5f6: 0xb8, 0x5f7: 0xb8, - 0x5f8: 0xb8, 0x5f9: 0xb8, 0x5fa: 0xb8, 0x5fb: 0xb8, 0x5fc: 0xb8, 0x5fd: 0xb8, 0x5fe: 0xb8, 0x5ff: 0xb8, - // Block 0x18, offset 0x600 - 0x600: 0x15d, 0x601: 0xb8, 0x602: 0xb8, 0x603: 0xb8, 0x604: 0xb8, 0x605: 0xb8, 0x606: 0xb8, 0x607: 0xb8, - 0x608: 0xb8, 0x609: 0xb8, 0x60a: 0xb8, 0x60b: 0xb8, 0x60c: 0xb8, 0x60d: 0xb8, 0x60e: 0xb8, 0x60f: 0xb8, - 0x610: 0xb8, 0x611: 0xb8, 0x612: 0xb8, 0x613: 0xb8, 0x614: 0xb8, 0x615: 0xb8, 0x616: 0xb8, 0x617: 0xb8, - 0x618: 0xb8, 0x619: 0xb8, 0x61a: 0xb8, 0x61b: 0xb8, 0x61c: 0xb8, 0x61d: 0xb8, 0x61e: 0xb8, 0x61f: 0xb8, - 0x620: 0x9d, 0x621: 0x9d, 0x622: 0x9d, 0x623: 0x15e, 0x624: 0x6d, 0x625: 0x15f, 0x626: 0xb8, 0x627: 0xb8, - 0x628: 0xb8, 0x629: 0xb8, 0x62a: 0xb8, 0x62b: 0xb8, 0x62c: 0xb8, 0x62d: 0xb8, 0x62e: 0xb8, 0x62f: 0xb8, - 0x630: 0xb8, 0x631: 0xb8, 0x632: 0xb8, 0x633: 0xb8, 0x634: 0xb8, 0x635: 0xb8, 0x636: 0xb8, 0x637: 0xb8, - 0x638: 0x6e, 0x639: 0x6f, 0x63a: 0x70, 0x63b: 0x160, 0x63c: 0xb8, 0x63d: 0xb8, 0x63e: 0xb8, 0x63f: 0xb8, - // Block 0x19, offset 0x640 - 0x640: 0x161, 0x641: 0x9b, 0x642: 0x162, 0x643: 0x163, 0x644: 0x71, 0x645: 0x72, 0x646: 0x164, 0x647: 0x165, - 0x648: 0x73, 0x649: 0x166, 0x64a: 0xb8, 0x64b: 0xb8, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, - 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, - 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x167, 0x65c: 0x9b, 0x65d: 0x168, 0x65e: 0x9b, 0x65f: 0x169, - 0x660: 0x16a, 0x661: 0x16b, 0x662: 0x16c, 0x663: 0xb8, 0x664: 0x16d, 0x665: 0x16e, 0x666: 0x16f, 0x667: 0x170, - 0x668: 0xb8, 0x669: 0xb8, 0x66a: 0xb8, 0x66b: 0xb8, 0x66c: 0xb8, 0x66d: 0xb8, 0x66e: 0xb8, 0x66f: 0xb8, - 0x670: 0xb8, 0x671: 0xb8, 0x672: 0xb8, 0x673: 0xb8, 0x674: 0xb8, 0x675: 0xb8, 0x676: 0xb8, 0x677: 0xb8, - 0x678: 0xb8, 0x679: 0xb8, 0x67a: 0xb8, 0x67b: 0xb8, 0x67c: 0xb8, 0x67d: 0xb8, 0x67e: 0xb8, 0x67f: 0xb8, - // Block 0x1a, offset 0x680 - 0x680: 0x9d, 0x681: 0x9d, 0x682: 0x9d, 0x683: 0x9d, 0x684: 0x9d, 0x685: 0x9d, 0x686: 0x9d, 0x687: 0x9d, - 0x688: 0x9d, 0x689: 0x9d, 0x68a: 0x9d, 0x68b: 0x9d, 0x68c: 0x9d, 0x68d: 0x9d, 0x68e: 0x9d, 0x68f: 0x9d, - 0x690: 0x9d, 0x691: 0x9d, 0x692: 0x9d, 0x693: 0x9d, 0x694: 0x9d, 0x695: 0x9d, 0x696: 0x9d, 0x697: 0x9d, - 0x698: 0x9d, 0x699: 0x9d, 0x69a: 0x9d, 0x69b: 0x171, 0x69c: 0x9d, 0x69d: 0x9d, 0x69e: 0x9d, 0x69f: 0x9d, - 0x6a0: 0x9d, 0x6a1: 0x9d, 0x6a2: 0x9d, 0x6a3: 0x9d, 0x6a4: 0x9d, 0x6a5: 0x9d, 0x6a6: 0x9d, 0x6a7: 0x9d, - 0x6a8: 0x9d, 0x6a9: 0x9d, 0x6aa: 0x9d, 0x6ab: 0x9d, 0x6ac: 0x9d, 0x6ad: 0x9d, 0x6ae: 0x9d, 0x6af: 0x9d, - 0x6b0: 0x9d, 0x6b1: 0x9d, 0x6b2: 0x9d, 0x6b3: 0x9d, 0x6b4: 0x9d, 0x6b5: 0x9d, 0x6b6: 0x9d, 0x6b7: 0x9d, - 0x6b8: 0x9d, 0x6b9: 0x9d, 0x6ba: 0x9d, 0x6bb: 0x9d, 0x6bc: 0x9d, 0x6bd: 0x9d, 0x6be: 0x9d, 0x6bf: 0x9d, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x9d, 0x6c1: 0x9d, 0x6c2: 0x9d, 0x6c3: 0x9d, 0x6c4: 0x9d, 0x6c5: 0x9d, 0x6c6: 0x9d, 0x6c7: 0x9d, - 0x6c8: 0x9d, 0x6c9: 0x9d, 0x6ca: 0x9d, 0x6cb: 0x9d, 0x6cc: 0x9d, 0x6cd: 0x9d, 0x6ce: 0x9d, 0x6cf: 0x9d, - 0x6d0: 0x9d, 0x6d1: 0x9d, 0x6d2: 0x9d, 0x6d3: 0x9d, 0x6d4: 0x9d, 0x6d5: 0x9d, 0x6d6: 0x9d, 0x6d7: 0x9d, - 0x6d8: 0x9d, 0x6d9: 0x9d, 0x6da: 0x9d, 0x6db: 0x9d, 0x6dc: 0x172, 0x6dd: 0x9d, 0x6de: 0x9d, 0x6df: 0x9d, - 0x6e0: 0x173, 0x6e1: 0x9d, 0x6e2: 0x9d, 0x6e3: 0x9d, 0x6e4: 0x9d, 0x6e5: 0x9d, 0x6e6: 0x9d, 0x6e7: 0x9d, - 0x6e8: 0x9d, 0x6e9: 0x9d, 0x6ea: 0x9d, 0x6eb: 0x9d, 0x6ec: 0x9d, 0x6ed: 0x9d, 0x6ee: 0x9d, 0x6ef: 0x9d, - 0x6f0: 0x9d, 0x6f1: 0x9d, 0x6f2: 0x9d, 0x6f3: 0x9d, 0x6f4: 0x9d, 0x6f5: 0x9d, 0x6f6: 0x9d, 0x6f7: 0x9d, - 0x6f8: 0x9d, 0x6f9: 0x9d, 0x6fa: 0x9d, 0x6fb: 0x9d, 0x6fc: 0x9d, 0x6fd: 0x9d, 0x6fe: 0x9d, 0x6ff: 0x9d, - // Block 0x1c, offset 0x700 - 0x700: 0x9d, 0x701: 0x9d, 0x702: 0x9d, 0x703: 0x9d, 0x704: 0x9d, 0x705: 0x9d, 0x706: 0x9d, 0x707: 0x9d, - 0x708: 0x9d, 0x709: 0x9d, 0x70a: 0x9d, 0x70b: 0x9d, 0x70c: 0x9d, 0x70d: 0x9d, 0x70e: 0x9d, 0x70f: 0x9d, - 0x710: 0x9d, 0x711: 0x9d, 0x712: 0x9d, 0x713: 0x9d, 0x714: 0x9d, 0x715: 0x9d, 0x716: 0x9d, 0x717: 0x9d, - 0x718: 0x9d, 0x719: 0x9d, 0x71a: 0x9d, 0x71b: 0x9d, 0x71c: 0x9d, 0x71d: 0x9d, 0x71e: 0x9d, 0x71f: 0x9d, - 0x720: 0x9d, 0x721: 0x9d, 0x722: 0x9d, 0x723: 0x9d, 0x724: 0x9d, 0x725: 0x9d, 0x726: 0x9d, 0x727: 0x9d, - 0x728: 0x9d, 0x729: 0x9d, 0x72a: 0x9d, 0x72b: 0x9d, 0x72c: 0x9d, 0x72d: 0x9d, 0x72e: 0x9d, 0x72f: 0x9d, - 0x730: 0x9d, 0x731: 0x9d, 0x732: 0x9d, 0x733: 0x9d, 0x734: 0x9d, 0x735: 0x9d, 0x736: 0x9d, 0x737: 0x9d, - 0x738: 0x9d, 0x739: 0x9d, 0x73a: 0x174, 0x73b: 0xb8, 0x73c: 0xb8, 0x73d: 0xb8, 0x73e: 0xb8, 0x73f: 0xb8, - // Block 0x1d, offset 0x740 - 0x740: 0xb8, 0x741: 0xb8, 0x742: 0xb8, 0x743: 0xb8, 0x744: 0xb8, 0x745: 0xb8, 0x746: 0xb8, 0x747: 0xb8, - 0x748: 0xb8, 0x749: 0xb8, 0x74a: 0xb8, 0x74b: 0xb8, 0x74c: 0xb8, 0x74d: 0xb8, 0x74e: 0xb8, 0x74f: 0xb8, - 0x750: 0xb8, 0x751: 0xb8, 0x752: 0xb8, 0x753: 0xb8, 0x754: 0xb8, 0x755: 0xb8, 0x756: 0xb8, 0x757: 0xb8, - 0x758: 0xb8, 0x759: 0xb8, 0x75a: 0xb8, 0x75b: 0xb8, 0x75c: 0xb8, 0x75d: 0xb8, 0x75e: 0xb8, 0x75f: 0xb8, - 0x760: 0x74, 0x761: 0x75, 0x762: 0x76, 0x763: 0x175, 0x764: 0x77, 0x765: 0x78, 0x766: 0x176, 0x767: 0x79, - 0x768: 0x7a, 0x769: 0xb8, 0x76a: 0xb8, 0x76b: 0xb8, 0x76c: 0xb8, 0x76d: 0xb8, 0x76e: 0xb8, 0x76f: 0xb8, - 0x770: 0xb8, 0x771: 0xb8, 0x772: 0xb8, 0x773: 0xb8, 0x774: 0xb8, 0x775: 0xb8, 0x776: 0xb8, 0x777: 0xb8, - 0x778: 0xb8, 0x779: 0xb8, 0x77a: 0xb8, 0x77b: 0xb8, 0x77c: 0xb8, 0x77d: 0xb8, 0x77e: 0xb8, 0x77f: 0xb8, - // Block 0x1e, offset 0x780 - 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, - 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, - 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, - 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, - 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, - 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, - 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, - 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, - 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, - 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, - 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, - 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, - 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, - // Block 0x20, offset 0x800 - 0x800: 0x177, 0x801: 0x178, 0x802: 0xb8, 0x803: 0xb8, 0x804: 0x179, 0x805: 0x179, 0x806: 0x179, 0x807: 0x17a, - 0x808: 0xb8, 0x809: 0xb8, 0x80a: 0xb8, 0x80b: 0xb8, 0x80c: 0xb8, 0x80d: 0xb8, 0x80e: 0xb8, 0x80f: 0xb8, - 0x810: 0xb8, 0x811: 0xb8, 0x812: 0xb8, 0x813: 0xb8, 0x814: 0xb8, 0x815: 0xb8, 0x816: 0xb8, 0x817: 0xb8, - 0x818: 0xb8, 0x819: 0xb8, 0x81a: 0xb8, 0x81b: 0xb8, 0x81c: 0xb8, 0x81d: 0xb8, 0x81e: 0xb8, 0x81f: 0xb8, - 0x820: 0xb8, 0x821: 0xb8, 0x822: 0xb8, 0x823: 0xb8, 0x824: 0xb8, 0x825: 0xb8, 0x826: 0xb8, 0x827: 0xb8, - 0x828: 0xb8, 0x829: 0xb8, 0x82a: 0xb8, 0x82b: 0xb8, 0x82c: 0xb8, 0x82d: 0xb8, 0x82e: 0xb8, 0x82f: 0xb8, - 0x830: 0xb8, 0x831: 0xb8, 0x832: 0xb8, 0x833: 0xb8, 0x834: 0xb8, 0x835: 0xb8, 0x836: 0xb8, 0x837: 0xb8, - 0x838: 0xb8, 0x839: 0xb8, 0x83a: 0xb8, 0x83b: 0xb8, 0x83c: 0xb8, 0x83d: 0xb8, 0x83e: 0xb8, 0x83f: 0xb8, - // Block 0x21, offset 0x840 - 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, - 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, - 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, - 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, - 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, - 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, - 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, - 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, - // Block 0x22, offset 0x880 - 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, - 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, -} - -// idnaSparseOffset: 256 entries, 512 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x5c, 0x60, 0x6f, 0x74, 0x7b, 0x87, 0x95, 0xa3, 0xa8, 0xb1, 0xc1, 0xcf, 0xdc, 0xe8, 0xf9, 0x103, 0x10a, 0x117, 0x128, 0x12f, 0x13a, 0x149, 0x157, 0x161, 0x163, 0x167, 0x169, 0x175, 0x180, 0x188, 0x18e, 0x194, 0x199, 0x19e, 0x1a1, 0x1a5, 0x1ab, 0x1b0, 0x1bc, 0x1c6, 0x1cc, 0x1dd, 0x1e7, 0x1ea, 0x1f2, 0x1f5, 0x202, 0x20a, 0x20e, 0x215, 0x21d, 0x22d, 0x239, 0x23b, 0x245, 0x251, 0x25d, 0x269, 0x271, 0x276, 0x280, 0x291, 0x295, 0x2a0, 0x2a4, 0x2ad, 0x2b5, 0x2bb, 0x2c0, 0x2c3, 0x2c6, 0x2ca, 0x2d0, 0x2d4, 0x2d8, 0x2de, 0x2e5, 0x2eb, 0x2f3, 0x2fa, 0x305, 0x30f, 0x313, 0x316, 0x31c, 0x320, 0x322, 0x325, 0x327, 0x32a, 0x334, 0x337, 0x346, 0x34a, 0x34f, 0x352, 0x356, 0x35b, 0x360, 0x366, 0x36c, 0x37b, 0x381, 0x385, 0x394, 0x399, 0x3a1, 0x3ab, 0x3b6, 0x3be, 0x3cf, 0x3d8, 0x3e8, 0x3f5, 0x3ff, 0x404, 0x411, 0x415, 0x41a, 0x41c, 0x420, 0x422, 0x426, 0x42f, 0x435, 0x439, 0x449, 0x453, 0x458, 0x45b, 0x461, 0x468, 0x46d, 0x471, 0x477, 0x47c, 0x485, 0x48a, 0x490, 0x497, 0x49e, 0x4a5, 0x4a9, 0x4ae, 0x4b1, 0x4b6, 0x4c2, 0x4c8, 0x4cd, 0x4d4, 0x4dc, 0x4e1, 0x4e5, 0x4f5, 0x4fc, 0x500, 0x504, 0x50b, 0x50e, 0x511, 0x515, 0x519, 0x51f, 0x528, 0x534, 0x53b, 0x544, 0x54c, 0x553, 0x561, 0x56e, 0x57b, 0x584, 0x588, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5e5, 0x5ea, 0x5ed, 0x5f7, 0x600, 0x60c, 0x60f, 0x614, 0x617, 0x61a, 0x61d, 0x624, 0x62b, 0x62f, 0x63a, 0x63d, 0x643, 0x648, 0x64c, 0x64f, 0x652, 0x655, 0x65a, 0x664, 0x667, 0x66b, 0x67a, 0x686, 0x68a, 0x68f, 0x694, 0x698, 0x69d, 0x6a6, 0x6b1, 0x6b7, 0x6bf, 0x6c3, 0x6c7, 0x6cd, 0x6d3, 0x6d8, 0x6db, 0x6e9, 0x6f0, 0x6f3, 0x6f6, 0x6fa, 0x700, 0x705, 0x70f, 0x714, 0x717, 0x71a, 0x71d, 0x720, 0x724, 0x727, 0x737, 0x748, 0x74d, 0x74f, 0x751} - -// idnaSparseValues: 1876 entries, 7504 bytes -var idnaSparseValues = [1876]valueRange{ - // Block 0x0, offset 0x0 - {value: 0x0000, lo: 0x07}, - {value: 0xe105, lo: 0x80, hi: 0x96}, - {value: 0x0018, lo: 0x97, hi: 0x97}, - {value: 0xe105, lo: 0x98, hi: 0x9e}, - {value: 0x001f, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbf}, - // Block 0x1, offset 0x8 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0xe01d, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0335, lo: 0x83, hi: 0x83}, - {value: 0x034d, lo: 0x84, hi: 0x84}, - {value: 0x0365, lo: 0x85, hi: 0x85}, - {value: 0xe00d, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0xe00d, lo: 0x88, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x89}, - {value: 0xe00d, lo: 0x8a, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe00d, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0x8d}, - {value: 0xe00d, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0xbf}, - // Block 0x2, offset 0x19 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x0249, lo: 0xb0, hi: 0xb0}, - {value: 0x037d, lo: 0xb1, hi: 0xb1}, - {value: 0x0259, lo: 0xb2, hi: 0xb2}, - {value: 0x0269, lo: 0xb3, hi: 0xb3}, - {value: 0x034d, lo: 0xb4, hi: 0xb4}, - {value: 0x0395, lo: 0xb5, hi: 0xb5}, - {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, - {value: 0x0279, lo: 0xb7, hi: 0xb7}, - {value: 0x0289, lo: 0xb8, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbf}, - // Block 0x3, offset 0x25 - {value: 0x0000, lo: 0x01}, - {value: 0x1308, lo: 0x80, hi: 0xbf}, - // Block 0x4, offset 0x27 - {value: 0x0000, lo: 0x04}, - {value: 0x03f5, lo: 0x80, hi: 0x8f}, - {value: 0xe105, lo: 0x90, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x5, offset 0x2c - {value: 0x0000, lo: 0x07}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x0545, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x0008, lo: 0x99, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x6, offset 0x34 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0401, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x88}, - {value: 0x0018, lo: 0x89, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x1308, lo: 0x91, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, - // Block 0x7, offset 0x3f - {value: 0x0000, lo: 0x0b}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x85}, - {value: 0x0018, lo: 0x86, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x8, offset 0x4b - {value: 0x0000, lo: 0x10}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0208, lo: 0x81, hi: 0x87}, - {value: 0x0408, lo: 0x88, hi: 0x88}, - {value: 0x0208, lo: 0x89, hi: 0x8a}, - {value: 0x1308, lo: 0x8b, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xad}, - {value: 0x0208, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb4}, - {value: 0x0429, lo: 0xb5, hi: 0xb5}, - {value: 0x0451, lo: 0xb6, hi: 0xb6}, - {value: 0x0479, lo: 0xb7, hi: 0xb7}, - {value: 0x04a1, lo: 0xb8, hi: 0xb8}, - {value: 0x0208, lo: 0xb9, hi: 0xbf}, - // Block 0x9, offset 0x5c - {value: 0x0000, lo: 0x03}, - {value: 0x0208, lo: 0x80, hi: 0x87}, - {value: 0x0408, lo: 0x88, hi: 0x99}, - {value: 0x0208, lo: 0x9a, hi: 0xbf}, - // Block 0xa, offset 0x60 - {value: 0x0000, lo: 0x0e}, - {value: 0x1308, lo: 0x80, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0408, lo: 0x8d, hi: 0x8d}, - {value: 0x0208, lo: 0x8e, hi: 0x98}, - {value: 0x0408, lo: 0x99, hi: 0x9b}, - {value: 0x0208, lo: 0x9c, hi: 0xaa}, - {value: 0x0408, lo: 0xab, hi: 0xac}, - {value: 0x0208, lo: 0xad, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb1}, - {value: 0x0208, lo: 0xb2, hi: 0xb2}, - {value: 0x0408, lo: 0xb3, hi: 0xb4}, - {value: 0x0208, lo: 0xb5, hi: 0xb7}, - {value: 0x0408, lo: 0xb8, hi: 0xb9}, - {value: 0x0208, lo: 0xba, hi: 0xbf}, - // Block 0xb, offset 0x6f - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xc, offset 0x74 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0208, lo: 0x8a, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0xd, offset 0x7b - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0xa3}, - {value: 0x0008, lo: 0xa4, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xe, offset 0x87 - {value: 0x0000, lo: 0x0d}, - {value: 0x0408, lo: 0x80, hi: 0x80}, - {value: 0x0208, lo: 0x81, hi: 0x85}, - {value: 0x0408, lo: 0x86, hi: 0x87}, - {value: 0x0208, lo: 0x88, hi: 0x88}, - {value: 0x0408, lo: 0x89, hi: 0x89}, - {value: 0x0208, lo: 0x8a, hi: 0x93}, - {value: 0x0408, lo: 0x94, hi: 0x94}, - {value: 0x0208, lo: 0x95, hi: 0x95}, - {value: 0x0008, lo: 0x96, hi: 0x98}, - {value: 0x1308, lo: 0x99, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xf, offset 0x95 - {value: 0x0000, lo: 0x0d}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0208, lo: 0xa0, hi: 0xa9}, - {value: 0x0408, lo: 0xaa, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xad}, - {value: 0x0408, lo: 0xae, hi: 0xae}, - {value: 0x0208, lo: 0xaf, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb2}, - {value: 0x0208, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0208, lo: 0xb6, hi: 0xb8}, - {value: 0x0408, lo: 0xb9, hi: 0xb9}, - {value: 0x0208, lo: 0xba, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x10, offset 0xa3 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x93}, - {value: 0x1308, lo: 0x94, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xbf}, - // Block 0x11, offset 0xa8 - {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x12, offset 0xb1 - {value: 0x0000, lo: 0x0f}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x85}, - {value: 0x1008, lo: 0x86, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x1008, lo: 0x8a, hi: 0x8c}, - {value: 0x1b08, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x96}, - {value: 0x1008, lo: 0x97, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x13, offset 0xc1 - {value: 0x0000, lo: 0x0d}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xa9}, - {value: 0x0008, lo: 0xaa, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbf}, - // Block 0x14, offset 0xcf - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x15, offset 0xdc - {value: 0x0000, lo: 0x0b}, - {value: 0x0040, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x16, offset 0xe8 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x89}, - {value: 0x1b08, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8e}, - {value: 0x1008, lo: 0x8f, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x1008, lo: 0x98, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x17, offset 0xf9 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb2}, - {value: 0x08f1, lo: 0xb3, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb9}, - {value: 0x1b08, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x18, offset 0x103 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x8e}, - {value: 0x0018, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0xbf}, - // Block 0x19, offset 0x10a - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x1308, lo: 0x88, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0961, lo: 0x9c, hi: 0x9c}, - {value: 0x0999, lo: 0x9d, hi: 0x9d}, - {value: 0x0008, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1a, offset 0x117 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe03d, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xb8}, - {value: 0x1308, lo: 0xb9, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x1b, offset 0x128 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0x1c, offset 0x12f - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x1008, lo: 0xab, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xb0}, - {value: 0x1008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb7}, - {value: 0x1008, lo: 0xb8, hi: 0xb8}, - {value: 0x1b08, lo: 0xb9, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x1d, offset 0x13a - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x1008, lo: 0x96, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x1308, lo: 0x9e, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xa1}, - {value: 0x1008, lo: 0xa2, hi: 0xa4}, - {value: 0x0008, lo: 0xa5, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xbf}, - // Block 0x1e, offset 0x149 - {value: 0x0000, lo: 0x0d}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, - {value: 0x1008, lo: 0x87, hi: 0x8c}, - {value: 0x1308, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x8e}, - {value: 0x1008, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x1008, lo: 0x9a, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1f, offset 0x157 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x86}, - {value: 0x055d, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8c}, - {value: 0x055d, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbb}, - {value: 0xe105, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0x20, offset 0x161 - {value: 0x0000, lo: 0x01}, - {value: 0x0018, lo: 0x80, hi: 0xbf}, - // Block 0x21, offset 0x163 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xbf}, - // Block 0x22, offset 0x167 - {value: 0x0000, lo: 0x01}, - {value: 0x0008, lo: 0x80, hi: 0xbf}, - // Block 0x23, offset 0x169 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x24, offset 0x175 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x25, offset 0x180 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x26, offset 0x188 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x27, offset 0x18e - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x28, offset 0x194 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x29, offset 0x199 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x2a, offset 0x19e - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x2b, offset 0x1a1 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xbf}, - // Block 0x2c, offset 0x1a5 - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x2d, offset 0x1ab - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x2e, offset 0x1b0 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x93}, - {value: 0x1b08, lo: 0x94, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, - {value: 0x1b08, lo: 0xb4, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x2f, offset 0x1bc - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x30, offset 0x1c6 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xb3}, - {value: 0x1340, lo: 0xb4, hi: 0xb5}, - {value: 0x1008, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x31, offset 0x1cc - {value: 0x0000, lo: 0x10}, - {value: 0x1008, lo: 0x80, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, - {value: 0x1008, lo: 0x87, hi: 0x88}, - {value: 0x1308, lo: 0x89, hi: 0x91}, - {value: 0x1b08, lo: 0x92, hi: 0x92}, - {value: 0x1308, lo: 0x93, hi: 0x93}, - {value: 0x0018, lo: 0x94, hi: 0x96}, - {value: 0x0008, lo: 0x97, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x32, offset 0x1dd - {value: 0x0000, lo: 0x09}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x86}, - {value: 0x0218, lo: 0x87, hi: 0x87}, - {value: 0x0018, lo: 0x88, hi: 0x8a}, - {value: 0x13c0, lo: 0x8b, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0208, lo: 0xa0, hi: 0xbf}, - // Block 0x33, offset 0x1e7 - {value: 0x0000, lo: 0x02}, - {value: 0x0208, lo: 0x80, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x34, offset 0x1ea - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, - {value: 0x0208, lo: 0x87, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xa9}, - {value: 0x0208, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x35, offset 0x1f2 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x36, offset 0x1f5 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb8}, - {value: 0x1308, lo: 0xb9, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x37, offset 0x202 - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x38, offset 0x20a - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x39, offset 0x20e - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0028, lo: 0x9a, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0xbf}, - // Block 0x3a, offset 0x215 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x1308, lo: 0x97, hi: 0x98}, - {value: 0x1008, lo: 0x99, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x3b, offset 0x21d - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x94}, - {value: 0x1008, lo: 0x95, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x96}, - {value: 0x1008, lo: 0x97, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1b08, lo: 0xa0, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xac}, - {value: 0x1008, lo: 0xad, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, - // Block 0x3c, offset 0x22d - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xbd}, - {value: 0x1318, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x3d, offset 0x239 - {value: 0x0000, lo: 0x01}, - {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x3e, offset 0x23b - {value: 0x0000, lo: 0x09}, - {value: 0x1308, lo: 0x80, hi: 0x83}, - {value: 0x1008, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbf}, - // Block 0x3f, offset 0x245 - {value: 0x0000, lo: 0x0b}, - {value: 0x1008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, - {value: 0x1808, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x40, offset 0x251 - {value: 0x0000, lo: 0x0b}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa9}, - {value: 0x1808, lo: 0xaa, hi: 0xaa}, - {value: 0x1b08, lo: 0xab, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xbf}, - // Block 0x41, offset 0x25d - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa9}, - {value: 0x1008, lo: 0xaa, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xae}, - {value: 0x1308, lo: 0xaf, hi: 0xb1}, - {value: 0x1808, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbf}, - // Block 0x42, offset 0x269 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x1008, lo: 0xa4, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x43, offset 0x271 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x44, offset 0x276 - {value: 0x0000, lo: 0x09}, - {value: 0x0e29, lo: 0x80, hi: 0x80}, - {value: 0x0e41, lo: 0x81, hi: 0x81}, - {value: 0x0e59, lo: 0x82, hi: 0x82}, - {value: 0x0e71, lo: 0x83, hi: 0x83}, - {value: 0x0e89, lo: 0x84, hi: 0x85}, - {value: 0x0ea1, lo: 0x86, hi: 0x86}, - {value: 0x0eb9, lo: 0x87, hi: 0x87}, - {value: 0x057d, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0x45, offset 0x280 - {value: 0x0000, lo: 0x10}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x1308, lo: 0x90, hi: 0x92}, - {value: 0x0018, lo: 0x93, hi: 0x93}, - {value: 0x1308, lo: 0x94, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa8}, - {value: 0x0008, lo: 0xa9, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x46, offset 0x291 - {value: 0x0000, lo: 0x03}, - {value: 0x1308, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x1308, lo: 0xbb, hi: 0xbf}, - // Block 0x47, offset 0x295 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x87}, - {value: 0xe045, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0xe045, lo: 0x98, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0xe045, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbf}, - // Block 0x48, offset 0x2a0 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x1318, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0x49, offset 0x2a4 - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x88}, - {value: 0x24c1, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x4a, offset 0x2ad - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x24f1, lo: 0xac, hi: 0xac}, - {value: 0x2529, lo: 0xad, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xae}, - {value: 0x2579, lo: 0xaf, hi: 0xaf}, - {value: 0x25b1, lo: 0xb0, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x4b, offset 0x2b5 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x9f}, - {value: 0x0080, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xad}, - {value: 0x0080, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x4c, offset 0x2bb - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xa8}, - {value: 0x09c5, lo: 0xa9, hi: 0xa9}, - {value: 0x09e5, lo: 0xaa, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xbf}, - // Block 0x4d, offset 0x2c0 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x4e, offset 0x2c3 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xbf}, - // Block 0x4f, offset 0x2c6 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x28c1, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x50, offset 0x2ca - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0e66, lo: 0xb4, hi: 0xb4}, - {value: 0x292a, lo: 0xb5, hi: 0xb5}, - {value: 0x0e86, lo: 0xb6, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x51, offset 0x2d0 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x9b}, - {value: 0x2941, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0xbf}, - // Block 0x52, offset 0x2d4 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x53, offset 0x2d8 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbc}, - {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0x54, offset 0x2de - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xab}, - {value: 0x0018, lo: 0xac, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x55, offset 0x2e5 - {value: 0x0000, lo: 0x05}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x03f5, lo: 0x90, hi: 0x9f}, - {value: 0x0ea5, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x56, offset 0x2eb - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x57, offset 0x2f3 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xae}, - {value: 0xe075, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0x58, offset 0x2fa - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x59, offset 0x305 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xbf}, - // Block 0x5a, offset 0x30f - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x5b, offset 0x313 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0xbf}, - // Block 0x5c, offset 0x316 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9e}, - {value: 0x0edd, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x5d, offset 0x31c - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb2}, - {value: 0x0efd, lo: 0xb3, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x5e, offset 0x320 - {value: 0x0020, lo: 0x01}, - {value: 0x0f1d, lo: 0x80, hi: 0xbf}, - // Block 0x5f, offset 0x322 - {value: 0x0020, lo: 0x02}, - {value: 0x171d, lo: 0x80, hi: 0x8f}, - {value: 0x18fd, lo: 0x90, hi: 0xbf}, - // Block 0x60, offset 0x325 - {value: 0x0020, lo: 0x01}, - {value: 0x1efd, lo: 0x80, hi: 0xbf}, - // Block 0x61, offset 0x327 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x62, offset 0x32a - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x1308, lo: 0x99, hi: 0x9a}, - {value: 0x29e2, lo: 0x9b, hi: 0x9b}, - {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, - {value: 0x0008, lo: 0x9d, hi: 0x9e}, - {value: 0x2a31, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x63, offset 0x334 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xbe}, - {value: 0x2a69, lo: 0xbf, hi: 0xbf}, - // Block 0x64, offset 0x337 - {value: 0x0000, lo: 0x0e}, - {value: 0x0040, lo: 0x80, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xb0}, - {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, - {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, - {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, - {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, - {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, - {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, - {value: 0x2abd, lo: 0xb7, hi: 0xb7}, - {value: 0x2add, lo: 0xb8, hi: 0xb9}, - {value: 0x2afd, lo: 0xba, hi: 0xbb}, - {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, - {value: 0x2afd, lo: 0xbe, hi: 0xbf}, - // Block 0x65, offset 0x346 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x66, offset 0x34a - {value: 0x0030, lo: 0x04}, - {value: 0x2aa2, lo: 0x80, hi: 0x9d}, - {value: 0x305a, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x67, offset 0x34f - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x68, offset 0x352 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x69, offset 0x356 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x6a, offset 0x35b - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x6b, offset 0x360 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb1}, - {value: 0x0018, lo: 0xb2, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6c, offset 0x366 - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xb7}, - {value: 0x2009, lo: 0xb8, hi: 0xb8}, - {value: 0x6e89, lo: 0xb9, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6d, offset 0x36c - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0x85}, - {value: 0x1b08, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x1308, lo: 0x8b, hi: 0x8b}, - {value: 0x0008, lo: 0x8c, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6e, offset 0x37b - {value: 0x0000, lo: 0x05}, - {value: 0x0208, lo: 0x80, hi: 0xb1}, - {value: 0x0108, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6f, offset 0x381 - {value: 0x0000, lo: 0x03}, - {value: 0x1008, lo: 0x80, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xbf}, - // Block 0x70, offset 0x385 - {value: 0x0000, lo: 0x0e}, - {value: 0x1008, lo: 0x80, hi: 0x83}, - {value: 0x1b08, lo: 0x84, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xba}, - {value: 0x0008, lo: 0xbb, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x71, offset 0x394 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x72, offset 0x399 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x91}, - {value: 0x1008, lo: 0x92, hi: 0x92}, - {value: 0x1808, lo: 0x93, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x73, offset 0x3a1 - {value: 0x0000, lo: 0x09}, - {value: 0x1308, lo: 0x80, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb9}, - {value: 0x1008, lo: 0xba, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbf}, - // Block 0x74, offset 0x3ab - {value: 0x0000, lo: 0x0a}, - {value: 0x1808, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x75, offset 0x3b6 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x76, offset 0x3be - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x8b}, - {value: 0x1308, lo: 0x8c, hi: 0x8c}, - {value: 0x1008, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbd}, - {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x77, offset 0x3cf - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbf}, - // Block 0x78, offset 0x3d8 - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x9a}, - {value: 0x0008, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xaa}, - {value: 0x1008, lo: 0xab, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb5}, - {value: 0x1b08, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x79, offset 0x3e8 - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x90}, - {value: 0x0008, lo: 0x91, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x7a, offset 0x3f5 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x4465, lo: 0x9c, hi: 0x9c}, - {value: 0x447d, lo: 0x9d, hi: 0x9d}, - {value: 0x2971, lo: 0x9e, hi: 0x9e}, - {value: 0xe06d, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xaf}, - {value: 0x4495, lo: 0xb0, hi: 0xbf}, - // Block 0x7b, offset 0x3ff - {value: 0x0000, lo: 0x04}, - {value: 0x44b5, lo: 0x80, hi: 0x8f}, - {value: 0x44d5, lo: 0x90, hi: 0x9f}, - {value: 0x44f5, lo: 0xa0, hi: 0xaf}, - {value: 0x44d5, lo: 0xb0, hi: 0xbf}, - // Block 0x7c, offset 0x404 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1b08, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7d, offset 0x411 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7e, offset 0x415 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x7f, offset 0x41a - {value: 0x0020, lo: 0x01}, - {value: 0x4515, lo: 0x80, hi: 0xbf}, - // Block 0x80, offset 0x41c - {value: 0x0020, lo: 0x03}, - {value: 0x4d15, lo: 0x80, hi: 0x94}, - {value: 0x4ad5, lo: 0x95, hi: 0x95}, - {value: 0x4fb5, lo: 0x96, hi: 0xbf}, - // Block 0x81, offset 0x420 - {value: 0x0020, lo: 0x01}, - {value: 0x54f5, lo: 0x80, hi: 0xbf}, - // Block 0x82, offset 0x422 - {value: 0x0020, lo: 0x03}, - {value: 0x5cf5, lo: 0x80, hi: 0x84}, - {value: 0x5655, lo: 0x85, hi: 0x85}, - {value: 0x5d95, lo: 0x86, hi: 0xbf}, - // Block 0x83, offset 0x426 - {value: 0x0020, lo: 0x08}, - {value: 0x6b55, lo: 0x80, hi: 0x8f}, - {value: 0x6d15, lo: 0x90, hi: 0x90}, - {value: 0x6d55, lo: 0x91, hi: 0xab}, - {value: 0x6ea1, lo: 0xac, hi: 0xac}, - {value: 0x70b5, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x70d5, lo: 0xb0, hi: 0xbf}, - // Block 0x84, offset 0x42f - {value: 0x0020, lo: 0x05}, - {value: 0x72d5, lo: 0x80, hi: 0xad}, - {value: 0x6535, lo: 0xae, hi: 0xae}, - {value: 0x7895, lo: 0xaf, hi: 0xb5}, - {value: 0x6f55, lo: 0xb6, hi: 0xb6}, - {value: 0x7975, lo: 0xb7, hi: 0xbf}, - // Block 0x85, offset 0x435 - {value: 0x0028, lo: 0x03}, - {value: 0x7c21, lo: 0x80, hi: 0x82}, - {value: 0x7be1, lo: 0x83, hi: 0x83}, - {value: 0x7c99, lo: 0x84, hi: 0xbf}, - // Block 0x86, offset 0x439 - {value: 0x0038, lo: 0x0f}, - {value: 0x9db1, lo: 0x80, hi: 0x83}, - {value: 0x9e59, lo: 0x84, hi: 0x85}, - {value: 0x9e91, lo: 0x86, hi: 0x87}, - {value: 0x9ec9, lo: 0x88, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0xa089, lo: 0x92, hi: 0x97}, - {value: 0xa1a1, lo: 0x98, hi: 0x9c}, - {value: 0xa281, lo: 0x9d, hi: 0xb3}, - {value: 0x9d41, lo: 0xb4, hi: 0xb4}, - {value: 0x9db1, lo: 0xb5, hi: 0xb5}, - {value: 0xa789, lo: 0xb6, hi: 0xbb}, - {value: 0xa869, lo: 0xbc, hi: 0xbc}, - {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, - {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, - // Block 0x87, offset 0x449 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbb}, - {value: 0x0008, lo: 0xbc, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x88, offset 0x453 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x89, offset 0x458 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x8a, offset 0x45b - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x8b, offset 0x461 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8c, offset 0x468 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8d, offset 0x46d - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8e, offset 0x471 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x8f, offset 0x477 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x90, offset 0x47c - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x91, offset 0x485 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x92, offset 0x48a - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x93, offset 0x490 - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x97}, - {value: 0x8ad5, lo: 0x98, hi: 0x9f}, - {value: 0x8aed, lo: 0xa0, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x94, offset 0x497 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x8aed, lo: 0xb0, hi: 0xb7}, - {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, - // Block 0x95, offset 0x49e - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x96, offset 0x4a5 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x97, offset 0x4a9 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xae}, - {value: 0x0018, lo: 0xaf, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x98, offset 0x4ae - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x99, offset 0x4b1 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x9a, offset 0x4b6 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbb}, - {value: 0x0008, lo: 0xbc, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x9b, offset 0x4c2 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x96}, - {value: 0x0018, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x9c, offset 0x4c8 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa6}, - {value: 0x0018, lo: 0xa7, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9d, offset 0x4cd - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x9e, offset 0x4d4 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0018, lo: 0x96, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x9f, offset 0x4dc - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbd}, - {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0xa0, offset 0x4e1 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x0018, lo: 0x92, hi: 0xbf}, - // Block 0xa1, offset 0x4e5 - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8b}, - {value: 0x1308, lo: 0x8c, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x94}, - {value: 0x0008, lo: 0x95, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0x98}, - {value: 0x0008, lo: 0x99, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa2, offset 0x4f5 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbc}, - {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0xa3, offset 0x4fc - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa4, offset 0x500 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb8}, - {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa5, offset 0x504 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xbf}, - // Block 0xa6, offset 0x50b - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa7, offset 0x50e - {value: 0x0000, lo: 0x02}, - {value: 0x03dd, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xa8, offset 0x511 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xa9, offset 0x515 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xaa, offset 0x519 - {value: 0x0000, lo: 0x05}, - {value: 0x1008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbf}, - // Block 0xab, offset 0x51f - {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x85}, - {value: 0x1b08, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x91}, - {value: 0x0018, lo: 0x92, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xac, offset 0x528 - {value: 0x0000, lo: 0x0b}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb6}, - {value: 0x1008, lo: 0xb7, hi: 0xb8}, - {value: 0x1b08, lo: 0xb9, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbc}, - {value: 0x0340, lo: 0xbd, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xad, offset 0x534 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xae, offset 0x53b - {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xb2}, - {value: 0x1b08, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xaf, offset 0x544 - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb0, offset 0x54c - {value: 0x0000, lo: 0x06}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xbe}, - {value: 0x1008, lo: 0xbf, hi: 0xbf}, - // Block 0xb1, offset 0x553 - {value: 0x0000, lo: 0x0d}, - {value: 0x1808, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x89}, - {value: 0x1308, lo: 0x8a, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb2, offset 0x561 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xae}, - {value: 0x1308, lo: 0xaf, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, - {value: 0x1808, lo: 0xb5, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb3, offset 0x56e - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0008, lo: 0x9f, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xb4, offset 0x57b - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x1308, lo: 0x9f, hi: 0x9f}, - {value: 0x1008, lo: 0xa0, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xa9}, - {value: 0x1b08, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb5, offset 0x584 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbf}, - // Block 0xb6, offset 0x588 - {value: 0x0000, lo: 0x0d}, - {value: 0x1008, lo: 0x80, hi: 0x81}, - {value: 0x1b08, lo: 0x82, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x84}, - {value: 0x1008, lo: 0x85, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xb7, offset 0x596 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb8}, - {value: 0x1008, lo: 0xb9, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, - // Block 0xb8, offset 0x59e - {value: 0x0000, lo: 0x0a}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x81}, - {value: 0x1b08, lo: 0x82, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x85}, - {value: 0x0018, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xb9, offset 0x5a9 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x1008, lo: 0xb8, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xba, offset 0x5b2 - {value: 0x0000, lo: 0x05}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9b}, - {value: 0x1308, lo: 0x9c, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xbb, offset 0x5b8 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbc, offset 0x5c0 - {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xbd, offset 0x5c9 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb5}, - {value: 0x1808, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xbe, offset 0x5d3 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xbf, offset 0x5d6 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9f}, - {value: 0x1008, lo: 0xa0, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xaa}, - {value: 0x1b08, lo: 0xab, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc0, offset 0x5e2 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc1, offset 0x5e5 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc2, offset 0x5ea - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xc3, offset 0x5ed - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc4, offset 0x5f7 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xc5, offset 0x600 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xa9}, - {value: 0x1308, lo: 0xaa, hi: 0xb0}, - {value: 0x1008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xc6, offset 0x60c - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xc7, offset 0x60f - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xc8, offset 0x614 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xc9, offset 0x617 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xbf}, - // Block 0xca, offset 0x61a - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xcb, offset 0x61d - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xcc, offset 0x624 - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xcd, offset 0x62b - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xce, offset 0x62f - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x0008, lo: 0xa3, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xcf, offset 0x63a - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xd0, offset 0x63d - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x1008, lo: 0x91, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd1, offset 0x643 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x8e}, - {value: 0x1308, lo: 0x8f, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xd2, offset 0x648 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0xd3, offset 0x64c - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xd4, offset 0x64f - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xd5, offset 0x652 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0xbf}, - // Block 0xd6, offset 0x655 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xd7, offset 0x65a - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x03c0, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xd8, offset 0x664 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xd9, offset 0x667 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xda, offset 0x66b - {value: 0x0000, lo: 0x0e}, - {value: 0x0018, lo: 0x80, hi: 0x9d}, - {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, - {value: 0xb601, lo: 0x9f, hi: 0x9f}, - {value: 0xb649, lo: 0xa0, hi: 0xa0}, - {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, - {value: 0xb719, lo: 0xa2, hi: 0xa2}, - {value: 0xb781, lo: 0xa3, hi: 0xa3}, - {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, - {value: 0x1018, lo: 0xa5, hi: 0xa6}, - {value: 0x1318, lo: 0xa7, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xac}, - {value: 0x1018, lo: 0xad, hi: 0xb2}, - {value: 0x0340, lo: 0xb3, hi: 0xba}, - {value: 0x1318, lo: 0xbb, hi: 0xbf}, - // Block 0xdb, offset 0x67a - {value: 0x0000, lo: 0x0b}, - {value: 0x1318, lo: 0x80, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0x84}, - {value: 0x1318, lo: 0x85, hi: 0x8b}, - {value: 0x0018, lo: 0x8c, hi: 0xa9}, - {value: 0x1318, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xba}, - {value: 0xb851, lo: 0xbb, hi: 0xbb}, - {value: 0xb899, lo: 0xbc, hi: 0xbc}, - {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, - {value: 0xb949, lo: 0xbe, hi: 0xbe}, - {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, - // Block 0xdc, offset 0x686 - {value: 0x0000, lo: 0x03}, - {value: 0xba19, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xdd, offset 0x68a - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x1318, lo: 0x82, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xde, offset 0x68f - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xdf, offset 0x694 - {value: 0x0000, lo: 0x03}, - {value: 0x1308, lo: 0x80, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xba}, - {value: 0x1308, lo: 0xbb, hi: 0xbf}, - // Block 0xe0, offset 0x698 - {value: 0x0000, lo: 0x04}, - {value: 0x1308, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xe1, offset 0x69d - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x1308, lo: 0xa1, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xe2, offset 0x6a6 - {value: 0x0000, lo: 0x0a}, - {value: 0x1308, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x1308, lo: 0x88, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xa4}, - {value: 0x0040, lo: 0xa5, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xe3, offset 0x6b1 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8f}, - {value: 0x1308, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xe4, offset 0x6b7 - {value: 0x0000, lo: 0x07}, - {value: 0x0208, lo: 0x80, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xe5, offset 0x6bf - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe6, offset 0x6c3 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0xe7, offset 0x6c7 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0xe8, offset 0x6cd - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xe9, offset 0x6d3 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, - {value: 0xc1c1, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xea, offset 0x6d8 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0xeb, offset 0x6db - {value: 0x0000, lo: 0x0d}, - {value: 0xc7e9, lo: 0x80, hi: 0x80}, - {value: 0xc839, lo: 0x81, hi: 0x81}, - {value: 0xc889, lo: 0x82, hi: 0x82}, - {value: 0xc8d9, lo: 0x83, hi: 0x83}, - {value: 0xc929, lo: 0x84, hi: 0x84}, - {value: 0xc979, lo: 0x85, hi: 0x85}, - {value: 0xc9c9, lo: 0x86, hi: 0x86}, - {value: 0xca19, lo: 0x87, hi: 0x87}, - {value: 0xca69, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0xcab9, lo: 0x90, hi: 0x90}, - {value: 0xcad9, lo: 0x91, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xec, offset 0x6e9 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x92}, - {value: 0x0040, lo: 0x93, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xed, offset 0x6f0 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xee, offset 0x6f3 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0xbf}, - // Block 0xef, offset 0x6f6 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0xf0, offset 0x6fa - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0xf1, offset 0x700 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0xf2, offset 0x705 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb2}, - {value: 0x0018, lo: 0xb3, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xf3, offset 0x70f - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xf4, offset 0x714 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xf5, offset 0x717 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0xbf}, - // Block 0xf6, offset 0x71a - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xf7, offset 0x71d - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xf8, offset 0x720 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0xf9, offset 0x724 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xbf}, - // Block 0xfa, offset 0x727 - {value: 0x0020, lo: 0x0f}, - {value: 0xdeb9, lo: 0x80, hi: 0x89}, - {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, - {value: 0xdff9, lo: 0x8b, hi: 0x9c}, - {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, - {value: 0xe239, lo: 0x9e, hi: 0xa2}, - {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, - {value: 0xe2d9, lo: 0xa4, hi: 0xab}, - {value: 0x7ed5, lo: 0xac, hi: 0xac}, - {value: 0xe3d9, lo: 0xad, hi: 0xaf}, - {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, - {value: 0xe439, lo: 0xb1, hi: 0xb6}, - {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, - {value: 0xe4f9, lo: 0xba, hi: 0xba}, - {value: 0x8edd, lo: 0xbb, hi: 0xbb}, - {value: 0xe519, lo: 0xbc, hi: 0xbf}, - // Block 0xfb, offset 0x737 - {value: 0x0020, lo: 0x10}, - {value: 0x937d, lo: 0x80, hi: 0x80}, - {value: 0xf099, lo: 0x81, hi: 0x86}, - {value: 0x939d, lo: 0x87, hi: 0x8a}, - {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, - {value: 0xf159, lo: 0x8c, hi: 0x96}, - {value: 0x941d, lo: 0x97, hi: 0x97}, - {value: 0xf2b9, lo: 0x98, hi: 0xa3}, - {value: 0x943d, lo: 0xa4, hi: 0xa6}, - {value: 0xf439, lo: 0xa7, hi: 0xaa}, - {value: 0x949d, lo: 0xab, hi: 0xab}, - {value: 0xf4b9, lo: 0xac, hi: 0xac}, - {value: 0x94bd, lo: 0xad, hi: 0xad}, - {value: 0xf4d9, lo: 0xae, hi: 0xaf}, - {value: 0x94dd, lo: 0xb0, hi: 0xb1}, - {value: 0xf519, lo: 0xb2, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xfc, offset 0x748 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0340, lo: 0x81, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x9f}, - {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0xfd, offset 0x74d - {value: 0x0000, lo: 0x01}, - {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0xfe, offset 0x74f - {value: 0x0000, lo: 0x01}, - {value: 0x13c0, lo: 0x80, hi: 0xbf}, - // Block 0xff, offset 0x751 - {value: 0x0000, lo: 0x02}, - {value: 0x13c0, lo: 0x80, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, -} - -// Total table size 41559 bytes (40KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go deleted file mode 100644 index c4ef847e..00000000 --- a/vendor/golang.org/x/net/idna/trie.go +++ /dev/null @@ -1,72 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -// appendMapping appends the mapping for the respective rune. isMapped must be -// true. A mapping is a categorization of a rune as defined in UTS #46. -func (c info) appendMapping(b []byte, s string) []byte { - index := int(c >> indexShift) - if c&xorBit == 0 { - s := mappings[index:] - return append(b, s[1:s[0]+1]...) - } - b = append(b, s...) - if c&inlineXOR == inlineXOR { - // TODO: support and handle two-byte inline masks - b[len(b)-1] ^= byte(index) - } else { - for p := len(b) - int(xorData[index]); p < len(b); p++ { - index++ - b[p] ^= xorData[index] - } - } - return b -} - -// Sparse block handling code. - -type valueRange struct { - value uint16 // header: value:stride - lo, hi byte // header: lo:n -} - -type sparseBlocks struct { - values []valueRange - offset []uint16 -} - -var idnaSparse = sparseBlocks{ - values: idnaSparseValues[:], - offset: idnaSparseOffset[:], -} - -// Don't use newIdnaTrie to avoid unconditional linking in of the table. -var trie = &idnaTrie{} - -// lookup determines the type of block n and looks up the value for b. -// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block -// is a list of ranges with an accompanying value. Given a matching range r, -// the value for b is by r.value + (b - r.lo) * stride. -func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { - offset := t.offset[n] - header := t.values[offset] - lo := offset + 1 - hi := lo + uint16(header.lo) - for lo < hi { - m := lo + (hi-lo)/2 - r := t.values[m] - if r.lo <= b && b <= r.hi { - return r.value + uint16(b-r.lo)*header.value - } - if b < r.lo { - hi = m - } else { - lo = m + 1 - } - } - return 0 -} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go deleted file mode 100644 index 63cb03b5..00000000 --- a/vendor/golang.org/x/net/idna/trieval.go +++ /dev/null @@ -1,114 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package idna - -// This file contains definitions for interpreting the trie value of the idna -// trie generated by "go run gen*.go". It is shared by both the generator -// program and the resultant package. Sharing is achieved by the generator -// copying gen_trieval.go to trieval.go and changing what's above this comment. - -// info holds information from the IDNA mapping table for a single rune. It is -// the value returned by a trie lookup. In most cases, all information fits in -// a 16-bit value. For mappings, this value may contain an index into a slice -// with the mapped string. Such mappings can consist of the actual mapped value -// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the -// input rune. This technique is used by the cases packages and reduces the -// table size significantly. -// -// The per-rune values have the following format: -// -// if mapped { -// if inlinedXOR { -// 15..13 inline XOR marker -// 12..11 unused -// 10..3 inline XOR mask -// } else { -// 15..3 index into xor or mapping table -// } -// } else { -// 15..13 unused -// 12 modifier (including virama) -// 11 virama modifier -// 10..8 joining type -// 7..3 category type -// } -// 2 use xor pattern -// 1..0 mapped category -// -// See the definitions below for a more detailed description of the various -// bits. -type info uint16 - -const ( - catSmallMask = 0x3 - catBigMask = 0xF8 - indexShift = 3 - xorBit = 0x4 // interpret the index as an xor pattern - inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. - - joinShift = 8 - joinMask = 0x07 - - viramaModifier = 0x0800 - modifier = 0x1000 -) - -// A category corresponds to a category defined in the IDNA mapping table. -type category uint16 - -const ( - unknown category = 0 // not defined currently in unicode. - mapped category = 1 - disallowedSTD3Mapped category = 2 - deviation category = 3 -) - -const ( - valid category = 0x08 - validNV8 category = 0x18 - validXV8 category = 0x28 - disallowed category = 0x40 - disallowedSTD3Valid category = 0x80 - ignored category = 0xC0 -) - -// join types and additional rune information -const ( - joiningL = (iota + 1) - joiningD - joiningT - joiningR - - //the following types are derived during processing - joinZWJ - joinZWNJ - joinVirama - numJoinTypes -) - -func (c info) isMapped() bool { - return c&0x3 != 0 -} - -func (c info) category() category { - small := c & catSmallMask - if small != 0 { - return category(small) - } - return category(c & catBigMask) -} - -func (c info) joinType() info { - if c.isMapped() { - return 0 - } - return (c >> joinShift) & joinMask -} - -func (c info) isModifier() bool { - return c&(modifier|catSmallMask) == modifier -} - -func (c info) isViramaModifier() bool { - return c&(viramaModifier|catSmallMask) == viramaModifier -} diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go index c9df24d9..cea712fa 100644 --- a/vendor/golang.org/x/net/internal/iana/const.go +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -1,44 +1,40 @@ // go generate gen.go -// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). package iana // import "golang.org/x/net/internal/iana" -// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12 +// Differentiated Services Field Codepoints (DSCP), Updated: 2018-05-04 const ( - DiffServCS0 = 0x0 // CS0 - DiffServCS1 = 0x20 // CS1 - DiffServCS2 = 0x40 // CS2 - DiffServCS3 = 0x60 // CS3 - DiffServCS4 = 0x80 // CS4 - DiffServCS5 = 0xa0 // CS5 - DiffServCS6 = 0xc0 // CS6 - DiffServCS7 = 0xe0 // CS7 - DiffServAF11 = 0x28 // AF11 - DiffServAF12 = 0x30 // AF12 - DiffServAF13 = 0x38 // AF13 - DiffServAF21 = 0x48 // AF21 - DiffServAF22 = 0x50 // AF22 - DiffServAF23 = 0x58 // AF23 - DiffServAF31 = 0x68 // AF31 - DiffServAF32 = 0x70 // AF32 - DiffServAF33 = 0x78 // AF33 - DiffServAF41 = 0x88 // AF41 - DiffServAF42 = 0x90 // AF42 - DiffServAF43 = 0x98 // AF43 - DiffServEF = 0xb8 // EF - DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT + DiffServCS0 = 0x00 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEF = 0xb8 // EF + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT + NotECNTransport = 0x00 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x01 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x02 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x03 // CE (Congestion Experienced) ) -// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06 -const ( - NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport) - ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1)) - ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0)) - CongestionExperienced = 0x3 // CE (Congestion Experienced) -) - -// Protocol Numbers, Updated: 2016-06-22 +// Protocol Numbers, Updated: 2017-10-13 const ( ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option @@ -178,3 +174,50 @@ const ( ProtocolROHC = 142 // Robust Header Compression ProtocolReserved = 255 // Reserved ) + +// Address Family Numbers, Updated: 2018-04-02 +const ( + AddrFamilyIPv4 = 1 // IP (IP version 4) + AddrFamilyIPv6 = 2 // IP6 (IP version 6) + AddrFamilyNSAP = 3 // NSAP + AddrFamilyHDLC = 4 // HDLC (8-bit multidrop) + AddrFamilyBBN1822 = 5 // BBN 1822 + AddrFamily802 = 6 // 802 (includes all 802 media plus Ethernet "canonical format") + AddrFamilyE163 = 7 // E.163 + AddrFamilyE164 = 8 // E.164 (SMDS, Frame Relay, ATM) + AddrFamilyF69 = 9 // F.69 (Telex) + AddrFamilyX121 = 10 // X.121 (X.25, Frame Relay) + AddrFamilyIPX = 11 // IPX + AddrFamilyAppletalk = 12 // Appletalk + AddrFamilyDecnetIV = 13 // Decnet IV + AddrFamilyBanyanVines = 14 // Banyan Vines + AddrFamilyE164withSubaddress = 15 // E.164 with NSAP format subaddress + AddrFamilyDNS = 16 // DNS (Domain Name System) + AddrFamilyDistinguishedName = 17 // Distinguished Name + AddrFamilyASNumber = 18 // AS Number + AddrFamilyXTPoverIPv4 = 19 // XTP over IP version 4 + AddrFamilyXTPoverIPv6 = 20 // XTP over IP version 6 + AddrFamilyXTPnativemodeXTP = 21 // XTP native mode XTP + AddrFamilyFibreChannelWorldWidePortName = 22 // Fibre Channel World-Wide Port Name + AddrFamilyFibreChannelWorldWideNodeName = 23 // Fibre Channel World-Wide Node Name + AddrFamilyGWID = 24 // GWID + AddrFamilyL2VPN = 25 // AFI for L2VPN information + AddrFamilyMPLSTPSectionEndpointID = 26 // MPLS-TP Section Endpoint Identifier + AddrFamilyMPLSTPLSPEndpointID = 27 // MPLS-TP LSP Endpoint Identifier + AddrFamilyMPLSTPPseudowireEndpointID = 28 // MPLS-TP Pseudowire Endpoint Identifier + AddrFamilyMTIPv4 = 29 // MT IP: Multi-Topology IP version 4 + AddrFamilyMTIPv6 = 30 // MT IPv6: Multi-Topology IP version 6 + AddrFamilyEIGRPCommonServiceFamily = 16384 // EIGRP Common Service Family + AddrFamilyEIGRPIPv4ServiceFamily = 16385 // EIGRP IPv4 Service Family + AddrFamilyEIGRPIPv6ServiceFamily = 16386 // EIGRP IPv6 Service Family + AddrFamilyLISPCanonicalAddressFormat = 16387 // LISP Canonical Address Format (LCAF) + AddrFamilyBGPLS = 16388 // BGP-LS + AddrFamily48bitMAC = 16389 // 48-bit MAC + AddrFamily64bitMAC = 16390 // 64-bit MAC + AddrFamilyOUI = 16391 // OUI + AddrFamilyMACFinal24bits = 16392 // MAC/24 + AddrFamilyMACFinal40bits = 16393 // MAC/40 + AddrFamilyIPv6Initial64bits = 16394 // IPv6/64 + AddrFamilyRBridgePortID = 16395 // RBridge Port ID + AddrFamilyTRILLNickname = 16396 // TRILL Nickname +) diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go deleted file mode 100644 index 86c78b3b..00000000 --- a/vendor/golang.org/x/net/internal/iana/gen.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates internet protocol constants and tables by -// reading IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "strconv" - "strings" -) - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "http://www.iana.org/assignments/dscp-registry/dscp-registry.xml", - parseDSCPRegistry, - }, - { - "http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", - parseTOSTCByte, - }, - { - "http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", - parseProtocolNumbers, - }, -} - -func main() { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") - fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") - fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) - os.Exit(1) - } - if err := r.parse(&bb, resp.Body); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile("const.go", b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func parseDSCPRegistry(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var dr dscpRegistry - if err := dec.Decode(&dr); err != nil { - return err - } - drs := dr.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) - fmt.Fprintf(w, "const (\n") - for _, dr := range drs { - fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value) - fmt.Fprintf(w, "// %s\n", dr.OrigName) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type dscpRegistry struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Note string `xml:"note"` - RegTitle string `xml:"registry>title"` - PoolRecords []struct { - Name string `xml:"name"` - Space string `xml:"space"` - } `xml:"registry>record"` - Records []struct { - Name string `xml:"name"` - Space string `xml:"space"` - } `xml:"registry>registry>record"` -} - -type canonDSCPRecord struct { - OrigName string - Name string - Value int -} - -func (drr *dscpRegistry) escape() []canonDSCPRecord { - drs := make([]canonDSCPRecord, len(drr.Records)) - sr := strings.NewReplacer( - "+", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, dr := range drr.Records { - s := strings.TrimSpace(dr.Name) - drs[i].OrigName = s - drs[i].Name = sr.Replace(s) - n, err := strconv.ParseUint(dr.Space, 2, 8) - if err != nil { - continue - } - drs[i].Value = int(n) << 2 - } - return drs -} - -func parseTOSTCByte(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var ttb tosTCByte - if err := dec.Decode(&ttb); err != nil { - return err - } - trs := ttb.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated) - fmt.Fprintf(w, "const (\n") - for _, tr := range trs { - fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value) - fmt.Fprintf(w, "// %s\n", tr.OrigKeyword) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type tosTCByte struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Note string `xml:"note"` - RegTitle string `xml:"registry>title"` - Records []struct { - Binary string `xml:"binary"` - Keyword string `xml:"keyword"` - } `xml:"registry>record"` -} - -type canonTOSTCByteRecord struct { - OrigKeyword string - Keyword string - Value int -} - -func (ttb *tosTCByte) escape() []canonTOSTCByteRecord { - trs := make([]canonTOSTCByteRecord, len(ttb.Records)) - sr := strings.NewReplacer( - "Capable", "", - "(", "", - ")", "", - "+", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, tr := range ttb.Records { - s := strings.TrimSpace(tr.Keyword) - trs[i].OrigKeyword = s - ss := strings.Split(s, " ") - if len(ss) > 1 { - trs[i].Keyword = strings.Join(ss[1:], " ") - } else { - trs[i].Keyword = ss[0] - } - trs[i].Keyword = sr.Replace(trs[i].Keyword) - n, err := strconv.ParseUint(tr.Binary, 2, 8) - if err != nil { - continue - } - trs[i].Value = int(n) - } - return trs -} - -func parseProtocolNumbers(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var pn protocolNumbers - if err := dec.Decode(&pn); err != nil { - return err - } - prs := pn.escape() - prs = append([]canonProtocolRecord{{ - Name: "IP", - Descr: "IPv4 encapsulation, pseudo protocol number", - Value: 0, - }}, prs...) - fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) - s := pr.Descr - if s == "" { - s = pr.OrigName - } - fmt.Fprintf(w, "// %s\n", s) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type protocolNumbers struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - RegTitle string `xml:"registry>title"` - Note string `xml:"registry>note"` - Records []struct { - Value string `xml:"value"` - Name string `xml:"name"` - Descr string `xml:"description"` - } `xml:"registry>record"` -} - -type canonProtocolRecord struct { - OrigName string - Name string - Descr string - Value int -} - -func (pn *protocolNumbers) escape() []canonProtocolRecord { - prs := make([]canonProtocolRecord, len(pn.Records)) - sr := strings.NewReplacer( - "-in-", "in", - "-within-", "within", - "-over-", "over", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range pn.Records { - if strings.Contains(pr.Name, "Deprecated") || - strings.Contains(pr.Name, "deprecated") { - continue - } - prs[i].OrigName = pr.Name - s := strings.TrimSpace(pr.Name) - switch pr.Name { - case "ISIS over IPv4": - prs[i].Name = "ISIS" - case "manet": - prs[i].Name = "MANET" - default: - prs[i].Name = sr.Replace(s) - } - ss := strings.Split(pr.Descr, "\n") - for i := range ss { - ss[i] = strings.TrimSpace(ss[i]) - } - if len(ss) > 1 { - prs[i].Descr = strings.Join(ss, " ") - } else { - prs[i].Descr = ss[0] - } - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_bsd.go b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go deleted file mode 100644 index a6e433b5..00000000 --- a/vendor/golang.org/x/net/internal/nettest/helper_bsd.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package nettest - -import ( - "runtime" - "strconv" - "strings" - "syscall" -) - -var darwinVersion int - -func init() { - if runtime.GOOS == "darwin" { - // See http://support.apple.com/kb/HT1633. - s, err := syscall.Sysctl("kern.osrelease") - if err != nil { - return - } - ss := strings.Split(s, ".") - if len(ss) == 0 { - return - } - darwinVersion, _ = strconv.Atoi(ss[0]) - } -} - -func supportsIPv6MulticastDeliveryOnLoopback() bool { - switch runtime.GOOS { - case "freebsd": - // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. - // Even after the fix, it looks like the latest - // kernels don't deliver link-local scoped multicast - // packets correctly. - return false - case "darwin": - return !causesIPv6Crash() - default: - return true - } -} - -func causesIPv6Crash() bool { - // We see some kernel crash when running IPv6 with IP-level - // options on Darwin kernel version 12 or below. - // See golang.org/issues/17015. - return darwinVersion < 13 -} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go deleted file mode 100644 index bc7da5e0..00000000 --- a/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux solaris - -package nettest - -func supportsIPv6MulticastDeliveryOnLoopback() bool { - return true -} - -func causesIPv6Crash() bool { - return false -} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_posix.go b/vendor/golang.org/x/net/internal/nettest/helper_posix.go deleted file mode 100644 index 963ed996..00000000 --- a/vendor/golang.org/x/net/internal/nettest/helper_posix.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows - -package nettest - -import ( - "os" - "syscall" -) - -func protocolNotSupported(err error) bool { - switch err := err.(type) { - case syscall.Errno: - switch err { - case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: - return true - } - case *os.SyscallError: - switch err := err.Err.(type) { - case syscall.Errno: - switch err { - case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: - return true - } - } - } - return false -} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_stub.go b/vendor/golang.org/x/net/internal/nettest/helper_stub.go deleted file mode 100644 index ea61b6f3..00000000 --- a/vendor/golang.org/x/net/internal/nettest/helper_stub.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build nacl plan9 - -package nettest - -import ( - "fmt" - "runtime" -) - -func maxOpenFiles() int { - return defaultMaxOpenFiles -} - -func supportsRawIPSocket() (string, bool) { - return fmt.Sprintf("not supported on %s", runtime.GOOS), false -} - -func supportsIPv6MulticastDeliveryOnLoopback() bool { - return false -} - -func causesIPv6Crash() bool { - return false -} - -func protocolNotSupported(err error) bool { - return false -} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_unix.go b/vendor/golang.org/x/net/internal/nettest/helper_unix.go deleted file mode 100644 index ed13e448..00000000 --- a/vendor/golang.org/x/net/internal/nettest/helper_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package nettest - -import ( - "fmt" - "os" - "runtime" - "syscall" -) - -func maxOpenFiles() int { - var rlim syscall.Rlimit - if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil { - return defaultMaxOpenFiles - } - return int(rlim.Cur) -} - -func supportsRawIPSocket() (string, bool) { - if os.Getuid() != 0 { - return fmt.Sprintf("must be root on %s", runtime.GOOS), false - } - return "", true -} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_windows.go b/vendor/golang.org/x/net/internal/nettest/helper_windows.go deleted file mode 100644 index 3dcb727c..00000000 --- a/vendor/golang.org/x/net/internal/nettest/helper_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package nettest - -import ( - "fmt" - "runtime" - "syscall" -) - -func maxOpenFiles() int { - return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ -} - -func supportsRawIPSocket() (string, bool) { - // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx: - // Note: To use a socket of type SOCK_RAW requires administrative privileges. - // Users running Winsock applications that use raw sockets must be a member of - // the Administrators group on the local computer, otherwise raw socket calls - // will fail with an error code of WSAEACCES. On Windows Vista and later, access - // for raw sockets is enforced at socket creation. In earlier versions of Windows, - // access for raw sockets is enforced during other socket operations. - s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, 0) - if err == syscall.WSAEACCES { - return fmt.Sprintf("no access to raw socket allowed on %s", runtime.GOOS), false - } - if err != nil { - return err.Error(), false - } - syscall.Closesocket(s) - return "", true -} - -func supportsIPv6MulticastDeliveryOnLoopback() bool { - return true -} - -func causesIPv6Crash() bool { - return false -} diff --git a/vendor/golang.org/x/net/internal/nettest/interface.go b/vendor/golang.org/x/net/internal/nettest/interface.go deleted file mode 100644 index 8e6333af..00000000 --- a/vendor/golang.org/x/net/internal/nettest/interface.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package nettest - -import "net" - -// IsMulticastCapable reports whether ifi is an IP multicast-capable -// network interface. Network must be "ip", "ip4" or "ip6". -func IsMulticastCapable(network string, ifi *net.Interface) (net.IP, bool) { - switch network { - case "ip", "ip4", "ip6": - default: - return nil, false - } - if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 { - return nil, false - } - return hasRoutableIP(network, ifi) -} - -// RoutedInterface returns a network interface that can route IP -// traffic and satisfies flags. It returns nil when an appropriate -// network interface is not found. Network must be "ip", "ip4" or -// "ip6". -func RoutedInterface(network string, flags net.Flags) *net.Interface { - switch network { - case "ip", "ip4", "ip6": - default: - return nil - } - ift, err := net.Interfaces() - if err != nil { - return nil - } - for _, ifi := range ift { - if ifi.Flags&flags != flags { - continue - } - if _, ok := hasRoutableIP(network, &ifi); !ok { - continue - } - return &ifi - } - return nil -} - -func hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) { - ifat, err := ifi.Addrs() - if err != nil { - return nil, false - } - for _, ifa := range ifat { - switch ifa := ifa.(type) { - case *net.IPAddr: - if ip := routableIP(network, ifa.IP); ip != nil { - return ip, true - } - case *net.IPNet: - if ip := routableIP(network, ifa.IP); ip != nil { - return ip, true - } - } - } - return nil, false -} - -func routableIP(network string, ip net.IP) net.IP { - if !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() { - return nil - } - switch network { - case "ip4": - if ip := ip.To4(); ip != nil { - return ip - } - case "ip6": - if ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation - return nil - } - if ip := ip.To16(); ip != nil && ip.To4() == nil { - return ip - } - default: - if ip := ip.To4(); ip != nil { - return ip - } - if ip := ip.To16(); ip != nil { - return ip - } - } - return nil -} diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit.go b/vendor/golang.org/x/net/internal/nettest/rlimit.go deleted file mode 100644 index bb34aec0..00000000 --- a/vendor/golang.org/x/net/internal/nettest/rlimit.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package nettest - -const defaultMaxOpenFiles = 256 - -// MaxOpenFiles returns the maximum number of open files for the -// caller's process. -func MaxOpenFiles() int { return maxOpenFiles() } diff --git a/vendor/golang.org/x/net/internal/nettest/stack.go b/vendor/golang.org/x/net/internal/nettest/stack.go deleted file mode 100644 index cc92c035..00000000 --- a/vendor/golang.org/x/net/internal/nettest/stack.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package nettest provides utilities for network testing. -package nettest // import "golang.org/x/net/internal/nettest" - -import ( - "fmt" - "io/ioutil" - "net" - "os" - "runtime" -) - -var ( - supportsIPv4 bool - supportsIPv6 bool -) - -func init() { - if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { - ln.Close() - supportsIPv4 = true - } - if ln, err := net.Listen("tcp6", "[::1]:0"); err == nil { - ln.Close() - supportsIPv6 = true - } -} - -// SupportsIPv4 reports whether the platform supports IPv4 networking -// functionality. -func SupportsIPv4() bool { return supportsIPv4 } - -// SupportsIPv6 reports whether the platform supports IPv6 networking -// functionality. -func SupportsIPv6() bool { return supportsIPv6 } - -// SupportsRawIPSocket reports whether the platform supports raw IP -// sockets. -func SupportsRawIPSocket() (string, bool) { - return supportsRawIPSocket() -} - -// SupportsIPv6MulticastDeliveryOnLoopback reports whether the -// platform supports IPv6 multicast packet delivery on software -// loopback interface. -func SupportsIPv6MulticastDeliveryOnLoopback() bool { - return supportsIPv6MulticastDeliveryOnLoopback() -} - -// ProtocolNotSupported reports whether err is a protocol not -// supported error. -func ProtocolNotSupported(err error) bool { - return protocolNotSupported(err) -} - -// TestableNetwork reports whether network is testable on the current -// platform configuration. -func TestableNetwork(network string) bool { - // This is based on logic from standard library's - // net/platform_test.go. - switch network { - case "unix", "unixgram": - switch runtime.GOOS { - case "android", "nacl", "plan9", "windows": - return false - } - if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { - return false - } - case "unixpacket": - switch runtime.GOOS { - case "android", "darwin", "freebsd", "nacl", "plan9", "windows": - return false - } - } - return true -} - -// NewLocalListener returns a listener which listens to a loopback IP -// address or local file system path. -// Network must be "tcp", "tcp4", "tcp6", "unix" or "unixpacket". -func NewLocalListener(network string) (net.Listener, error) { - switch network { - case "tcp": - if supportsIPv4 { - if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { - return ln, nil - } - } - if supportsIPv6 { - return net.Listen("tcp6", "[::1]:0") - } - case "tcp4": - if supportsIPv4 { - return net.Listen("tcp4", "127.0.0.1:0") - } - case "tcp6": - if supportsIPv6 { - return net.Listen("tcp6", "[::1]:0") - } - case "unix", "unixpacket": - return net.Listen(network, localPath()) - } - return nil, fmt.Errorf("%s is not supported", network) -} - -// NewLocalPacketListener returns a packet listener which listens to a -// loopback IP address or local file system path. -// Network must be "udp", "udp4", "udp6" or "unixgram". -func NewLocalPacketListener(network string) (net.PacketConn, error) { - switch network { - case "udp": - if supportsIPv4 { - if c, err := net.ListenPacket("udp4", "127.0.0.1:0"); err == nil { - return c, nil - } - } - if supportsIPv6 { - return net.ListenPacket("udp6", "[::1]:0") - } - case "udp4": - if supportsIPv4 { - return net.ListenPacket("udp4", "127.0.0.1:0") - } - case "udp6": - if supportsIPv6 { - return net.ListenPacket("udp6", "[::1]:0") - } - case "unixgram": - return net.ListenPacket(network, localPath()) - } - return nil, fmt.Errorf("%s is not supported", network) -} - -func localPath() string { - f, err := ioutil.TempFile("", "nettest") - if err != nil { - panic(err) - } - path := f.Name() - f.Close() - os.Remove(path) - return path -} diff --git a/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/vendor/golang.org/x/net/internal/socket/defs_darwin.go deleted file mode 100644 index 14e28c0b..00000000 --- a/vendor/golang.org/x/net/internal/socket/defs_darwin.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go deleted file mode 100644 index 14e28c0b..00000000 --- a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go deleted file mode 100644 index 14e28c0b..00000000 --- a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_linux.go b/vendor/golang.org/x/net/internal/socket/defs_linux.go deleted file mode 100644 index ce9ec2f6..00000000 --- a/vendor/golang.org/x/net/internal/socket/defs_linux.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include -#include - -#define _GNU_SOURCE -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type mmsghdr C.struct_mmsghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofMmsghdr = C.sizeof_struct_mmsghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go deleted file mode 100644 index 3f843356..00000000 --- a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type mmsghdr C.struct_mmsghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofMmsghdr = C.sizeof_struct_mmsghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go deleted file mode 100644 index 14e28c0b..00000000 --- a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/vendor/golang.org/x/net/internal/socket/defs_solaris.go deleted file mode 100644 index 14e28c0b..00000000 --- a/vendor/golang.org/x/net/internal/socket/defs_solaris.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW -) - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go index d6a570c9..05d6082d 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -10,6 +10,10 @@ package socket import "unsafe" func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } v.Base = (*byte)(unsafe.Pointer(&b[0])) - v.Len = uint32(len(b)) + v.Len = uint32(l) } diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go index 2ae435e6..afb34ad5 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -10,6 +10,10 @@ package socket import "unsafe" func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } v.Base = (*byte)(unsafe.Pointer(&b[0])) - v.Len = uint64(len(b)) + v.Len = uint64(l) } diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go index 100a6282..8d17a40c 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -10,6 +10,10 @@ package socket import "unsafe" func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } v.Base = (*int8)(unsafe.Pointer(&b[0])) - v.Len = uint64(len(b)) + v.Len = uint64(l) } diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go index 3fcb0428..b8c87b72 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -7,6 +7,10 @@ package socket func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } h.Iov = &vs[0] - h.Iovlen = int32(len(vs)) + h.Iovlen = int32(l) } diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go index 9f671aec..a7a5987c 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -10,8 +10,12 @@ package socket import "unsafe" func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } h.Iov = &vs[0] - h.Iovlen = uint32(len(vs)) + h.Iovlen = uint32(l) } func (h *msghdr) setControl(b []byte) { diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go index 9f787062..610fc4f3 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -10,8 +10,12 @@ package socket import "unsafe" func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } h.Iov = &vs[0] - h.Iovlen = uint64(len(vs)) + h.Iovlen = uint64(l) } func (h *msghdr) setControl(b []byte) { diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go index be354ff8..71a69e25 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -5,6 +5,10 @@ package socket func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } h.Iov = &vs[0] - h.Iovlen = uint32(len(vs)) + h.Iovlen = uint32(l) } diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go index d1b05939..6465b207 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -13,8 +13,10 @@ func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { for i := range vs { vs[i].set(bs[i]) } - h.Iov = &vs[0] - h.Iovlen = int32(len(vs)) + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } if len(oob) > 0 { h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) h.Accrightslen = int32(len(oob)) diff --git a/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/golang.org/x/net/internal/socket/socket.go index 729dea14..5f9730e6 100644 --- a/vendor/golang.org/x/net/internal/socket/socket.go +++ b/vendor/golang.org/x/net/internal/socket/socket.go @@ -110,7 +110,7 @@ func ControlMessageSpace(dataLen int) int { type ControlMessage []byte // Data returns the data field of the control message at the head on -// w. +// m. func (m ControlMessage) Data(dataLen int) []byte { l := controlHeaderLen() if len(m) < l || len(m) < l+dataLen { @@ -119,7 +119,7 @@ func (m ControlMessage) Data(dataLen int) []byte { return m[l : l+dataLen] } -// Next returns the control message at the next on w. +// Next returns the control message at the next on m. // // Next works only for standard control messages. func (m ControlMessage) Next(dataLen int) ControlMessage { @@ -131,7 +131,7 @@ func (m ControlMessage) Next(dataLen int) ControlMessage { } // MarshalHeader marshals the header fields of the control message at -// the head on w. +// the head on m. func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { if len(m) < controlHeaderLen() { return errors.New("short message") @@ -142,7 +142,7 @@ func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { } // ParseHeader parses and returns the header fields of the control -// message at the head on w. +// message at the head on m. func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { l := controlHeaderLen() if len(m) < l { @@ -152,7 +152,7 @@ func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil } -// Marshal marshals the control message at the head on w, and returns +// Marshal marshals the control message at the head on m, and returns // the next control message. func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { l := len(data) @@ -167,7 +167,7 @@ func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, erro return m.Next(l), nil } -// Parse parses w as a single or multiple control messages. +// Parse parses m as a single or multiple control messages. // // Parse works for both standard and compatible messages. func (m ControlMessage) Parse() ([]ControlMessage, error) { diff --git a/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go b/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go deleted file mode 100644 index 109fed76..00000000 --- a/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package socket_test - -import ( - "bytes" - "fmt" - "net" - "runtime" - "testing" - - "golang.org/x/net/internal/nettest" - "golang.org/x/net/internal/socket" -) - -type mockControl struct { - Level int - Type int - Data []byte -} - -func TestControlMessage(t *testing.T) { - for _, tt := range []struct { - cs []mockControl - }{ - { - []mockControl{ - {Level: 1, Type: 1}, - }, - }, - { - []mockControl{ - {Level: 2, Type: 2, Data: []byte{0xfe}}, - }, - }, - { - []mockControl{ - {Level: 3, Type: 3, Data: []byte{0xfe, 0xff, 0xff, 0xfe}}, - }, - }, - { - []mockControl{ - {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, - }, - }, - { - []mockControl{ - {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, - {Level: 2, Type: 2, Data: []byte{0xfe}}, - }, - }, - } { - var w []byte - var tailPadLen int - mm := socket.NewControlMessage([]int{0}) - for i, c := range tt.cs { - m := socket.NewControlMessage([]int{len(c.Data)}) - l := len(m) - len(mm) - if i == len(tt.cs)-1 && l > len(c.Data) { - tailPadLen = l - len(c.Data) - } - w = append(w, m...) - } - - var err error - ww := make([]byte, len(w)) - copy(ww, w) - m := socket.ControlMessage(ww) - for _, c := range tt.cs { - if err = m.MarshalHeader(c.Level, c.Type, len(c.Data)); err != nil { - t.Fatalf("(%v).MarshalHeader() = %v", tt.cs, err) - } - copy(m.Data(len(c.Data)), c.Data) - m = m.Next(len(c.Data)) - } - m = socket.ControlMessage(w) - for _, c := range tt.cs { - m, err = m.Marshal(c.Level, c.Type, c.Data) - if err != nil { - t.Fatalf("(%v).Marshal() = %v", tt.cs, err) - } - } - if !bytes.Equal(ww, w) { - t.Fatalf("got %#v; want %#v", ww, w) - } - - ws := [][]byte{w} - if tailPadLen > 0 { - // Test a message with no tail padding. - nopad := w[:len(w)-tailPadLen] - ws = append(ws, [][]byte{nopad}...) - } - for _, w := range ws { - ms, err := socket.ControlMessage(w).Parse() - if err != nil { - t.Fatalf("(%v).Parse() = %v", tt.cs, err) - } - for i, m := range ms { - lvl, typ, dataLen, err := m.ParseHeader() - if err != nil { - t.Fatalf("(%v).ParseHeader() = %v", tt.cs, err) - } - if lvl != tt.cs[i].Level || typ != tt.cs[i].Type || dataLen != len(tt.cs[i].Data) { - t.Fatalf("%v: got %d, %d, %d; want %d, %d, %d", tt.cs[i], lvl, typ, dataLen, tt.cs[i].Level, tt.cs[i].Type, len(tt.cs[i].Data)) - } - } - } - } -} - -func TestUDP(t *testing.T) { - c, err := nettest.NewLocalPacketListener("udp") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - - t.Run("Message", func(t *testing.T) { - testUDPMessage(t, c.(net.Conn)) - }) - switch runtime.GOOS { - case "linux": - t.Run("Messages", func(t *testing.T) { - testUDPMessages(t, c.(net.Conn)) - }) - } -} - -func testUDPMessage(t *testing.T, c net.Conn) { - cc, err := socket.NewConn(c) - if err != nil { - t.Fatal(err) - } - data := []byte("HELLO-R-U-THERE") - wm := socket.Message{ - Buffers: bytes.SplitAfter(data, []byte("-")), - Addr: c.LocalAddr(), - } - if err := cc.SendMsg(&wm, 0); err != nil { - t.Fatal(err) - } - b := make([]byte, 32) - rm := socket.Message{ - Buffers: [][]byte{b[:1], b[1:3], b[3:7], b[7:11], b[11:]}, - } - if err := cc.RecvMsg(&rm, 0); err != nil { - t.Fatal(err) - } - if !bytes.Equal(b[:rm.N], data) { - t.Fatalf("got %#v; want %#v", b[:rm.N], data) - } -} - -func testUDPMessages(t *testing.T, c net.Conn) { - cc, err := socket.NewConn(c) - if err != nil { - t.Fatal(err) - } - data := []byte("HELLO-R-U-THERE") - wmbs := bytes.SplitAfter(data, []byte("-")) - wms := []socket.Message{ - {Buffers: wmbs[:1], Addr: c.LocalAddr()}, - {Buffers: wmbs[1:], Addr: c.LocalAddr()}, - } - n, err := cc.SendMsgs(wms, 0) - if err != nil { - t.Fatal(err) - } - if n != len(wms) { - t.Fatalf("got %d; want %d", n, len(wms)) - } - b := make([]byte, 32) - rmbs := [][][]byte{{b[:len(wmbs[0])]}, {b[len(wmbs[0]):]}} - rms := []socket.Message{ - {Buffers: rmbs[0]}, - {Buffers: rmbs[1]}, - } - n, err = cc.RecvMsgs(rms, 0) - if err != nil { - t.Fatal(err) - } - if n != len(rms) { - t.Fatalf("got %d; want %d", n, len(rms)) - } - nn := 0 - for i := 0; i < n; i++ { - nn += rms[i].N - } - if !bytes.Equal(b[:nn], data) { - t.Fatalf("got %#v; want %#v", b[:nn], data) - } -} - -func BenchmarkUDP(b *testing.B) { - c, err := nettest.NewLocalPacketListener("udp") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - cc, err := socket.NewConn(c.(net.Conn)) - if err != nil { - b.Fatal(err) - } - data := []byte("HELLO-R-U-THERE") - wm := socket.Message{ - Buffers: [][]byte{data}, - Addr: c.LocalAddr(), - } - rm := socket.Message{ - Buffers: [][]byte{make([]byte, 128)}, - OOB: make([]byte, 128), - } - - for M := 1; M <= 1<<9; M = M << 1 { - b.Run(fmt.Sprintf("Iter-%d", M), func(b *testing.B) { - for i := 0; i < b.N; i++ { - for j := 0; j < M; j++ { - if err := cc.SendMsg(&wm, 0); err != nil { - b.Fatal(err) - } - if err := cc.RecvMsg(&rm, 0); err != nil { - b.Fatal(err) - } - } - } - }) - switch runtime.GOOS { - case "linux": - wms := make([]socket.Message, M) - for i := range wms { - wms[i].Buffers = [][]byte{data} - wms[i].Addr = c.LocalAddr() - } - rms := make([]socket.Message, M) - for i := range rms { - rms[i].Buffers = [][]byte{make([]byte, 128)} - rms[i].OOB = make([]byte, 128) - } - b.Run(fmt.Sprintf("Batch-%d", M), func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := cc.SendMsgs(wms, 0); err != nil { - b.Fatal(err) - } - if _, err := cc.RecvMsgs(rms, 0); err != nil { - b.Fatal(err) - } - } - }) - } - } -} diff --git a/vendor/golang.org/x/net/internal/socket/socket_test.go b/vendor/golang.org/x/net/internal/socket/socket_test.go deleted file mode 100644 index bf3751b5..00000000 --- a/vendor/golang.org/x/net/internal/socket/socket_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows - -package socket_test - -import ( - "net" - "runtime" - "syscall" - "testing" - - "golang.org/x/net/internal/nettest" - "golang.org/x/net/internal/socket" -) - -func TestSocket(t *testing.T) { - t.Run("Option", func(t *testing.T) { - testSocketOption(t, &socket.Option{Level: syscall.SOL_SOCKET, Name: syscall.SO_RCVBUF, Len: 4}) - }) -} - -func testSocketOption(t *testing.T, so *socket.Option) { - c, err := nettest.NewLocalPacketListener("udp") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - cc, err := socket.NewConn(c.(net.Conn)) - if err != nil { - t.Fatal(err) - } - const N = 2048 - if err := so.SetInt(cc, N); err != nil { - t.Fatal(err) - } - n, err := so.GetInt(cc) - if err != nil { - t.Fatal(err) - } - if n < N { - t.Fatalf("got %d; want greater than or equal to %d", n, N) - } -} diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go index 9a0dbcfb..dc130c27 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_posix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -34,7 +34,7 @@ func marshalSockaddr(ip net.IP, port int, zone string) []byte { if ip4 := ip.To4(); ip4 != nil { b := make([]byte, sizeofSockaddrInet) switch runtime.GOOS { - case "linux", "solaris", "windows": + case "android", "linux", "solaris", "windows": NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) default: b[0] = sizeofSockaddrInet @@ -47,7 +47,7 @@ func marshalSockaddr(ip net.IP, port int, zone string) []byte { if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { b := make([]byte, sizeofSockaddrInet6) switch runtime.GOOS { - case "linux", "solaris", "windows": + case "android", "linux", "solaris", "windows": NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) default: b[0] = sizeofSockaddrInet6 @@ -69,7 +69,7 @@ func parseInetAddr(b []byte, network string) (net.Addr, error) { } var af int switch runtime.GOOS { - case "linux", "solaris", "windows": + case "android", "linux", "solaris", "windows": af = int(NativeEndian.Uint16(b[:2])) default: af = int(b[1]) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 00000000..e2987f7d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go index 206ea2d1..db60491f 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -26,6 +26,11 @@ type msghdr struct { Flags int32 } +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + type cmsghdr struct { Len uint32 Level int32 @@ -52,6 +57,7 @@ type sockaddrInet6 struct { const ( sizeofIovec = 0x8 sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 sizeofCmsghdr = 0xc sizeofSockaddrInet = 0x10 diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 00000000..3d6f516a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 00000000..d93e699b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,316 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authention + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go deleted file mode 100644 index 685f0e7e..00000000 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package timeseries implements a time series structure for stats collection. -package timeseries // import "golang.org/x/net/internal/timeseries" - -import ( - "fmt" - "log" - "time" -) - -const ( - timeSeriesNumBuckets = 64 - minuteHourSeriesNumBuckets = 60 -) - -var timeSeriesResolutions = []time.Duration{ - 1 * time.Second, - 10 * time.Second, - 1 * time.Minute, - 10 * time.Minute, - 1 * time.Hour, - 6 * time.Hour, - 24 * time.Hour, // 1 day - 7 * 24 * time.Hour, // 1 week - 4 * 7 * 24 * time.Hour, // 4 weeks - 16 * 7 * 24 * time.Hour, // 16 weeks -} - -var minuteHourSeriesResolutions = []time.Duration{ - 1 * time.Second, - 1 * time.Minute, -} - -// An Observable is a kind of data that can be aggregated in a time series. -type Observable interface { - Multiply(ratio float64) // Multiplies the data in self by a given ratio - Add(other Observable) // Adds the data from a different observation to self - Clear() // Clears the observation so it can be reused. - CopyFrom(other Observable) // Copies the contents of a given observation to self -} - -// Float attaches the methods of Observable to a float64. -type Float float64 - -// NewFloat returns a Float. -func NewFloat() Observable { - f := Float(0) - return &f -} - -// String returns the float as a string. -func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } - -// Value returns the float's value. -func (f *Float) Value() float64 { return float64(*f) } - -func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } - -func (f *Float) Add(other Observable) { - o := other.(*Float) - *f += *o -} - -func (f *Float) Clear() { *f = 0 } - -func (f *Float) CopyFrom(other Observable) { - o := other.(*Float) - *f = *o -} - -// A Clock tells the current time. -type Clock interface { - Time() time.Time -} - -type defaultClock int - -var defaultClockInstance defaultClock - -func (defaultClock) Time() time.Time { return time.Now() } - -// Information kept per level. Each level consists of a circular list of -// observations. The start of the level may be derived from end and the -// len(buckets) * sizeInMillis. -type tsLevel struct { - oldest int // index to oldest bucketed Observable - newest int // index to newest bucketed Observable - end time.Time // end timestamp for this level - size time.Duration // duration of the bucketed Observable - buckets []Observable // collections of observations - provider func() Observable // used for creating new Observable -} - -func (l *tsLevel) Clear() { - l.oldest = 0 - l.newest = len(l.buckets) - 1 - l.end = time.Time{} - for i := range l.buckets { - if l.buckets[i] != nil { - l.buckets[i].Clear() - l.buckets[i] = nil - } - } -} - -func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { - l.size = size - l.provider = f - l.buckets = make([]Observable, numBuckets) -} - -// Keeps a sequence of levels. Each level is responsible for storing data at -// a given resolution. For example, the first level stores data at a one -// minute resolution while the second level stores data at a one hour -// resolution. - -// Each level is represented by a sequence of buckets. Each bucket spans an -// interval equal to the resolution of the level. New observations are added -// to the last bucket. -type timeSeries struct { - provider func() Observable // make more Observable - numBuckets int // number of buckets in each level - levels []*tsLevel // levels of bucketed Observable - lastAdd time.Time // time of last Observable tracked - total Observable // convenient aggregation of all Observable - clock Clock // Clock for getting current time - pending Observable // observations not yet bucketed - pendingTime time.Time // what time are we keeping in pending - dirty bool // if there are pending observations -} - -// init initializes a level according to the supplied criteria. -func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { - ts.provider = f - ts.numBuckets = numBuckets - ts.clock = clock - ts.levels = make([]*tsLevel, len(resolutions)) - - for i := range resolutions { - if i > 0 && resolutions[i-1] >= resolutions[i] { - log.Print("timeseries: resolutions must be monotonically increasing") - break - } - newLevel := new(tsLevel) - newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) - ts.levels[i] = newLevel - } - - ts.Clear() -} - -// Clear removes all observations from the time series. -func (ts *timeSeries) Clear() { - ts.lastAdd = time.Time{} - ts.total = ts.resetObservation(ts.total) - ts.pending = ts.resetObservation(ts.pending) - ts.pendingTime = time.Time{} - ts.dirty = false - - for i := range ts.levels { - ts.levels[i].Clear() - } -} - -// Add records an observation at the current time. -func (ts *timeSeries) Add(observation Observable) { - ts.AddWithTime(observation, ts.clock.Time()) -} - -// AddWithTime records an observation at the specified time. -func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { - - smallBucketDuration := ts.levels[0].size - - if t.After(ts.lastAdd) { - ts.lastAdd = t - } - - if t.After(ts.pendingTime) { - ts.advance(t) - ts.mergePendingUpdates() - ts.pendingTime = ts.levels[0].end - ts.pending.CopyFrom(observation) - ts.dirty = true - } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { - // The observation is close enough to go into the pending bucket. - // This compensates for clock skewing and small scheduling delays - // by letting the update stay in the fast path. - ts.pending.Add(observation) - ts.dirty = true - } else { - ts.mergeValue(observation, t) - } -} - -// mergeValue inserts the observation at the specified time in the past into all levels. -func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { - for _, level := range ts.levels { - index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) - if 0 <= index && index < ts.numBuckets { - bucketNumber := (level.oldest + index) % ts.numBuckets - if level.buckets[bucketNumber] == nil { - level.buckets[bucketNumber] = level.provider() - } - level.buckets[bucketNumber].Add(observation) - } - } - ts.total.Add(observation) -} - -// mergePendingUpdates applies the pending updates into all levels. -func (ts *timeSeries) mergePendingUpdates() { - if ts.dirty { - ts.mergeValue(ts.pending, ts.pendingTime) - ts.pending = ts.resetObservation(ts.pending) - ts.dirty = false - } -} - -// advance cycles the buckets at each level until the latest bucket in -// each level can hold the time specified. -func (ts *timeSeries) advance(t time.Time) { - if !t.After(ts.levels[0].end) { - return - } - for i := 0; i < len(ts.levels); i++ { - level := ts.levels[i] - if !level.end.Before(t) { - break - } - - // If the time is sufficiently far, just clear the level and advance - // directly. - if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { - for _, b := range level.buckets { - ts.resetObservation(b) - } - level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) - } - - for t.After(level.end) { - level.end = level.end.Add(level.size) - level.newest = level.oldest - level.oldest = (level.oldest + 1) % ts.numBuckets - ts.resetObservation(level.buckets[level.newest]) - } - - t = level.end - } -} - -// Latest returns the sum of the num latest buckets from the level. -func (ts *timeSeries) Latest(level, num int) Observable { - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - result := ts.provider() - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - if l.buckets[index] != nil { - result.Add(l.buckets[index]) - } - if index == 0 { - index = ts.numBuckets - } - index-- - } - - return result -} - -// LatestBuckets returns a copy of the num latest buckets from level. -func (ts *timeSeries) LatestBuckets(level, num int) []Observable { - if level < 0 || level > len(ts.levels) { - log.Print("timeseries: bad level argument: ", level) - return nil - } - if num < 0 || num >= ts.numBuckets { - log.Print("timeseries: bad num argument: ", num) - return nil - } - - results := make([]Observable, num) - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - result := ts.provider() - results[i] = result - if l.buckets[index] != nil { - result.CopyFrom(l.buckets[index]) - } - - if index == 0 { - index = ts.numBuckets - } - index -= 1 - } - return results -} - -// ScaleBy updates observations by scaling by factor. -func (ts *timeSeries) ScaleBy(factor float64) { - for _, l := range ts.levels { - for i := 0; i < ts.numBuckets; i++ { - l.buckets[i].Multiply(factor) - } - } - - ts.total.Multiply(factor) - ts.pending.Multiply(factor) -} - -// Range returns the sum of observations added over the specified time range. -// If start or finish times don't fall on bucket boundaries of the same -// level, then return values are approximate answers. -func (ts *timeSeries) Range(start, finish time.Time) Observable { - return ts.ComputeRange(start, finish, 1)[0] -} - -// Recent returns the sum of observations from the last delta. -func (ts *timeSeries) Recent(delta time.Duration) Observable { - now := ts.clock.Time() - return ts.Range(now.Add(-delta), now) -} - -// Total returns the total of all observations. -func (ts *timeSeries) Total() Observable { - ts.mergePendingUpdates() - return ts.total -} - -// ComputeRange computes a specified number of values into a slice using -// the observations recorded over the specified time period. The return -// values are approximate if the start or finish times don't fall on the -// bucket boundaries at the same level or if the number of buckets spanning -// the range is not an integral multiple of num. -func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { - if start.After(finish) { - log.Printf("timeseries: start > finish, %v>%v", start, finish) - return nil - } - - if num < 0 { - log.Printf("timeseries: num < 0, %v", num) - return nil - } - - results := make([]Observable, num) - - for _, l := range ts.levels { - if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { - ts.extract(l, start, finish, num, results) - return results - } - } - - // Failed to find a level that covers the desired range. So just - // extract from the last level, even if it doesn't cover the entire - // desired range. - ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) - - return results -} - -// RecentList returns the specified number of values in slice over the most -// recent time period of the specified range. -func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { - if delta < 0 { - return nil - } - now := ts.clock.Time() - return ts.ComputeRange(now.Add(-delta), now, num) -} - -// extract returns a slice of specified number of observations from a given -// level over a given range. -func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { - ts.mergePendingUpdates() - - srcInterval := l.size - dstInterval := finish.Sub(start) / time.Duration(num) - dstStart := start - srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) - - srcIndex := 0 - - // Where should scanning start? - if dstStart.After(srcStart) { - advance := dstStart.Sub(srcStart) / srcInterval - srcIndex += int(advance) - srcStart = srcStart.Add(advance * srcInterval) - } - - // The i'th value is computed as show below. - // interval = (finish/start)/num - // i'th value = sum of observation in range - // [ start + i * interval, - // start + (i + 1) * interval ) - for i := 0; i < num; i++ { - results[i] = ts.resetObservation(results[i]) - dstEnd := dstStart.Add(dstInterval) - for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { - srcEnd := srcStart.Add(srcInterval) - if srcEnd.After(ts.lastAdd) { - srcEnd = ts.lastAdd - } - - if !srcEnd.Before(dstStart) { - srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] - if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { - // dst completely contains src. - if srcValue != nil { - results[i].Add(srcValue) - } - } else { - // dst partially overlaps src. - overlapStart := maxTime(srcStart, dstStart) - overlapEnd := minTime(srcEnd, dstEnd) - base := srcEnd.Sub(srcStart) - fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() - - used := ts.provider() - if srcValue != nil { - used.CopyFrom(srcValue) - } - used.Multiply(fraction) - results[i].Add(used) - } - - if srcEnd.After(dstEnd) { - break - } - } - srcIndex++ - srcStart = srcStart.Add(srcInterval) - } - dstStart = dstStart.Add(dstInterval) - } -} - -// resetObservation clears the content so the struct may be reused. -func (ts *timeSeries) resetObservation(observation Observable) Observable { - if observation == nil { - observation = ts.provider() - } else { - observation.Clear() - } - return observation -} - -// TimeSeries tracks data at granularities from 1 second to 16 weeks. -type TimeSeries struct { - timeSeries -} - -// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. -func NewTimeSeries(f func() Observable) *TimeSeries { - return NewTimeSeriesWithClock(f, defaultClockInstance) -} - -// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { - ts := new(TimeSeries) - ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) - return ts -} - -// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. -type MinuteHourSeries struct { - timeSeries -} - -// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. -func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { - return NewMinuteHourSeriesWithClock(f, defaultClockInstance) -} - -// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { - ts := new(MinuteHourSeries) - ts.timeSeries.init(minuteHourSeriesResolutions, f, - minuteHourSeriesNumBuckets, clock) - return ts -} - -func (ts *MinuteHourSeries) Minute() Observable { - return ts.timeSeries.Latest(0, 60) -} - -func (ts *MinuteHourSeries) Hour() Observable { - return ts.timeSeries.Latest(1, 60) -} - -func minTime(a, b time.Time) time.Time { - if a.Before(b) { - return a - } - return b -} - -func maxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go deleted file mode 100644 index 66325a91..00000000 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package timeseries - -import ( - "math" - "testing" - "time" -) - -func isNear(x *Float, y float64, tolerance float64) bool { - return math.Abs(x.Value()-y) < tolerance -} - -func isApproximate(x *Float, y float64) bool { - return isNear(x, y, 1e-2) -} - -func checkApproximate(t *testing.T, o Observable, y float64) { - x := o.(*Float) - if !isApproximate(x, y) { - t.Errorf("Wanted %g, got %g", y, x.Value()) - } -} - -func checkNear(t *testing.T, o Observable, y, tolerance float64) { - x := o.(*Float) - if !isNear(x, y, tolerance) { - t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value()) - } -} - -var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC) - -func tu(s int64) time.Time { - return baseTime.Add(time.Duration(s) * time.Second) -} - -func tu2(s int64, ns int64) time.Time { - return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond) -} - -func TestBasicTimeSeries(t *testing.T) { - ts := NewTimeSeries(NewFloat) - fo := new(Float) - *fo = Float(10) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - checkApproximate(t, ts.Range(tu(0), tu(1)), 40) - checkApproximate(t, ts.Total(), 40) - ts.AddWithTime(fo, tu(3)) - ts.AddWithTime(fo, tu(3)) - ts.AddWithTime(fo, tu(3)) - checkApproximate(t, ts.Range(tu(0), tu(2)), 40) - checkApproximate(t, ts.Range(tu(2), tu(4)), 30) - checkApproximate(t, ts.Total(), 70) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - checkApproximate(t, ts.Range(tu(0), tu(2)), 60) - checkApproximate(t, ts.Range(tu(2), tu(4)), 30) - checkApproximate(t, ts.Total(), 90) - *fo = Float(100) - ts.AddWithTime(fo, tu(100)) - checkApproximate(t, ts.Range(tu(99), tu(100)), 100) - checkApproximate(t, ts.Range(tu(0), tu(4)), 36) - checkApproximate(t, ts.Total(), 190) - *fo = Float(10) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - checkApproximate(t, ts.Range(tu(0), tu(4)), 44) - checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100) - checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100) - checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100) - checkApproximate(t, ts.Total(), 210) - - for i, l := range ts.ComputeRange(tu(36), tu(100), 64) { - if i == 63 { - checkApproximate(t, l, 100) - } else { - checkApproximate(t, l, 0) - } - } - - checkApproximate(t, ts.Range(tu(0), tu(100)), 210) - checkApproximate(t, ts.Range(tu(10), tu(100)), 100) - - for i, l := range ts.ComputeRange(tu(0), tu(100), 100) { - if i < 10 { - checkApproximate(t, l, 11) - } else if i >= 90 { - checkApproximate(t, l, 10) - } else { - checkApproximate(t, l, 0) - } - } -} - -func TestFloat(t *testing.T) { - f := Float(1) - if g, w := f.String(), "1"; g != w { - t.Errorf("Float(1).String = %q; want %q", g, w) - } - f2 := Float(2) - var o Observable = &f2 - f.Add(o) - if g, w := f.Value(), 3.0; g != w { - t.Errorf("Float post-add = %v; want %v", g, w) - } - f.Multiply(2) - if g, w := f.Value(), 6.0; g != w { - t.Errorf("Float post-multiply = %v; want %v", g, w) - } - f.Clear() - if g, w := f.Value(), 0.0; g != w { - t.Errorf("Float post-clear = %v; want %v", g, w) - } - f.CopyFrom(&f2) - if g, w := f.Value(), 2.0; g != w { - t.Errorf("Float post-CopyFrom = %v; want %v", g, w) - } -} - -type mockClock struct { - time time.Time -} - -func (m *mockClock) Time() time.Time { return m.time } -func (m *mockClock) Set(t time.Time) { m.time = t } - -const buckets = 6 - -var testResolutions = []time.Duration{ - 10 * time.Second, // level holds one minute of observations - 100 * time.Second, // level holds ten minutes of observations - 10 * time.Minute, // level holds one hour of observations -} - -// TestTimeSeries uses a small number of buckets to force a higher -// error rate on approximations from the timeseries. -type TestTimeSeries struct { - timeSeries -} - -func TestExpectedErrorRate(t *testing.T) { - ts := new(TestTimeSeries) - fake := new(mockClock) - fake.Set(time.Now()) - ts.timeSeries.init(testResolutions, NewFloat, buckets, fake) - for i := 1; i <= 61*61; i++ { - fake.Set(fake.Time().Add(1 * time.Second)) - ob := Float(1) - ts.AddWithTime(&ob, fake.Time()) - - // The results should be accurate within one missing bucket (1/6) of the observations recorded. - checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10) - checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100) - checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600) - } -} - -func min(a, b float64) float64 { - if a < b { - return a - } - return b -} diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go index b4454992..5ce9b358 100644 --- a/vendor/golang.org/x/net/ipv4/batch.go +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -9,7 +9,6 @@ package ipv4 import ( "net" "runtime" - "syscall" "golang.org/x/net/internal/socket" ) @@ -76,7 +75,7 @@ type Message = socket.Message // headers. func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": @@ -107,7 +106,7 @@ func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { // On other platforms, this method will write only a single message. func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": @@ -139,7 +138,7 @@ func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { // On other platforms, this method will read only a single message. func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": @@ -170,7 +169,7 @@ func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { // On other platforms, this method will write only a single message. func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": diff --git a/vendor/golang.org/x/net/ipv4/bpf_test.go b/vendor/golang.org/x/net/ipv4/bpf_test.go deleted file mode 100644 index b44da905..00000000 --- a/vendor/golang.org/x/net/ipv4/bpf_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "net" - "runtime" - "testing" - "time" - - "golang.org/x/net/bpf" - "golang.org/x/net/ipv4" -) - -func TestBPF(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skipf("not supported on %s", runtime.GOOS) - } - - l, err := net.ListenPacket("udp4", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - defer l.Close() - - p := ipv4.NewPacketConn(l) - - // This filter accepts UDP packets whose first payload byte is - // even. - prog, err := bpf.Assemble([]bpf.Instruction{ - // Load the first byte of the payload (skipping UDP header). - bpf.LoadAbsolute{Off: 8, Size: 1}, - // Select LSB of the byte. - bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, - // Byte is even? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, - // Accept. - bpf.RetConstant{Val: 4096}, - // Ignore. - bpf.RetConstant{Val: 0}, - }) - if err != nil { - t.Fatalf("compiling BPF: %s", err) - } - - if err = p.SetBPF(prog); err != nil { - t.Fatalf("attaching filter to Conn: %s", err) - } - - s, err := net.Dial("udp4", l.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } - defer s.Close() - go func() { - for i := byte(0); i < 10; i++ { - s.Write([]byte{i}) - } - }() - - l.SetDeadline(time.Now().Add(2 * time.Second)) - seen := make([]bool, 5) - for { - var b [512]byte - n, _, err := l.ReadFrom(b[:]) - if err != nil { - t.Fatalf("reading from listener: %s", err) - } - if n != 1 { - t.Fatalf("unexpected packet length, want 1, got %d", n) - } - if b[0] >= 10 { - t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) - } - if b[0]%2 != 0 { - t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) - } - seen[b[0]/2] = true - - seenAll := true - for _, v := range seen { - if !v { - seenAll = false - break - } - } - if seenAll { - break - } - } -} diff --git a/vendor/golang.org/x/net/ipv4/control_test.go b/vendor/golang.org/x/net/ipv4/control_test.go deleted file mode 100644 index f87fe124..00000000 --- a/vendor/golang.org/x/net/ipv4/control_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "testing" - - "golang.org/x/net/ipv4" -) - -func TestControlMessageParseWithFuzz(t *testing.T) { - var cm ipv4.ControlMessage - for _, fuzz := range []string{ - "\f\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00", - "\f\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00", - } { - cm.Parse([]byte(fuzz)) - } -} diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go deleted file mode 100644 index c8f2e05b..00000000 --- a/vendor/golang.org/x/net/ipv4/defs_darwin.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_STRIPHDR = C.IP_STRIPHDR - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_BOUND_IF = C.IP_BOUND_IF - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_RECVPKTINFO = C.IP_RECVPKTINFO - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqn = C.sizeof_struct_ip_mreqn - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type inetPktinfo C.struct_in_pktinfo - -type ipMreq C.struct_ip_mreq - -type ipMreqn C.struct_ip_mreqn - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go deleted file mode 100644 index f30544ea..00000000 --- a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go deleted file mode 100644 index 4dd57d86..00000000 --- a/vendor/golang.org/x/net/ipv4/defs_freebsd.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_SENDSRCADDR = C.IP_SENDSRCADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_ONESBCAST = C.IP_ONESBCAST - sysIP_BINDANY = C.IP_BINDANY - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_MINTTL = C.IP_MINTTL - sysIP_DONTFRAG = C.IP_DONTFRAG - sysIP_RECVTOS = C.IP_RECVTOS - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqn = C.sizeof_struct_ip_mreqn - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type ipMreq C.struct_ip_mreq - -type ipMreqn C.struct_ip_mreqn - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go deleted file mode 100644 index beb11071..00000000 --- a/vendor/golang.org/x/net/ipv4/defs_linux.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -#include -#include -#include -#include -*/ -import "C" - -const ( - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_PKTOPTIONS = C.IP_PKTOPTIONS - sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER - sysIP_RECVERR = C.IP_RECVERR - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_RECVTOS = C.IP_RECVTOS - sysIP_MTU = C.IP_MTU - sysIP_FREEBIND = C.IP_FREEBIND - sysIP_TRANSPARENT = C.IP_TRANSPARENT - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR - sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR - sysIP_MINTTL = C.IP_MINTTL - sysIP_NODEFRAG = C.IP_NODEFRAG - sysIP_UNICAST_IF = C.IP_UNICAST_IF - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_MSFILTER = C.IP_MSFILTER - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_MSFILTER = C.MCAST_MSFILTER - sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL - - //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT - //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT - //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO - //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE - //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE - //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT - - sysICMP_FILTER = C.ICMP_FILTER - - sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE - sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL - sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP - sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 - sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS - sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING - - sysSOL_SOCKET = C.SOL_SOCKET - sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - - sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqn = C.sizeof_struct_ip_mreqn - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPFilter = C.sizeof_struct_icmp_filter - - sizeofSockFprog = C.sizeof_struct_sock_fprog -) - -type kernelSockaddrStorage C.struct___kernel_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type inetPktinfo C.struct_in_pktinfo - -type sockExtendedErr C.struct_sock_extended_err - -type ipMreq C.struct_ip_mreq - -type ipMreqn C.struct_ip_mreqn - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpFilter C.struct_icmp_filter - -type sockFProg C.struct_sock_fprog - -type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go deleted file mode 100644 index 8f8af1b8..00000000 --- a/vendor/golang.org/x/net/ipv4/defs_netbsd.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go deleted file mode 100644 index 8f8af1b8..00000000 --- a/vendor/golang.org/x/net/ipv4/defs_openbsd.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go deleted file mode 100644 index aeb33e9c..00000000 --- a/vendor/golang.org/x/net/ipv4/defs_solaris.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVSLLA = C.IP_RECVSLLA - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_NEXTHOP = C.IP_NEXTHOP - - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_RECVPKTINFO = C.IP_RECVPKTINFO - sysIP_DONTFRAG = C.IP_DONTFRAG - - sysIP_BOUND_IF = C.IP_BOUND_IF - sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC - sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL - sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF - - sysIP_REUSEADDR = C.IP_REUSEADDR - sysIP_DONTROUTE = C.IP_DONTROUTE - sysIP_BROADCAST = C.IP_BROADCAST - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type inetPktinfo C.struct_in_pktinfo - -type ipMreq C.struct_ip_mreq - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go index 54d77d5f..36764492 100644 --- a/vendor/golang.org/x/net/ipv4/dgramopt.go +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -6,7 +6,6 @@ package ipv4 import ( "net" - "syscall" "golang.org/x/net/bpf" ) @@ -15,7 +14,7 @@ import ( // multicast packets. func (c *dgramOpt) MulticastTTL() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoMulticastTTL] if !ok { @@ -28,7 +27,7 @@ func (c *dgramOpt) MulticastTTL() (int, error) { // outgoing multicast packets. func (c *dgramOpt) SetMulticastTTL(ttl int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastTTL] if !ok { @@ -41,7 +40,7 @@ func (c *dgramOpt) SetMulticastTTL(ttl int) error { // packet transmissions. func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { if !c.ok() { - return nil, syscall.EINVAL + return nil, errInvalidConn } so, ok := sockOpts[ssoMulticastInterface] if !ok { @@ -54,7 +53,7 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { // multicast packet transmissions. func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastInterface] if !ok { @@ -67,7 +66,7 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { // should be copied and send back to the originator. func (c *dgramOpt) MulticastLoopback() (bool, error) { if !c.ok() { - return false, syscall.EINVAL + return false, errInvalidConn } so, ok := sockOpts[ssoMulticastLoopback] if !ok { @@ -84,7 +83,7 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { // should be copied and send back to the originator. func (c *dgramOpt) SetMulticastLoopback(on bool) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastLoopback] if !ok { @@ -104,7 +103,7 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { // configuration. func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoJoinGroup] if !ok { @@ -122,7 +121,7 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { // source-specific group. func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoLeaveGroup] if !ok { @@ -143,7 +142,7 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { // routing configuration. func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoJoinSourceGroup] if !ok { @@ -164,7 +163,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net // interface ifi. func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoLeaveSourceGroup] if !ok { @@ -186,7 +185,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne // ifi. func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoBlockSourceGroup] if !ok { @@ -207,7 +206,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source // group by ExcludeSourceSpecificGroup again on the interface ifi. func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoUnblockSourceGroup] if !ok { @@ -228,7 +227,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source // Currently only Linux supports this. func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { if !c.ok() { - return nil, syscall.EINVAL + return nil, errInvalidConn } so, ok := sockOpts[ssoICMPFilter] if !ok { @@ -241,7 +240,7 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { // Currently only Linux supports this. func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoICMPFilter] if !ok { @@ -255,7 +254,7 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { // Only supported on Linux. func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoAttachFilter] if !ok { diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go index b43935a5..3efa2903 100644 --- a/vendor/golang.org/x/net/ipv4/doc.go +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -55,7 +55,7 @@ // Multicasting // // The options for multicasting are available for net.UDPConn and -// net.IPconn which are created as network connections that use the +// net.IPConn which are created as network connections that use the // IPv4 transport. A few network facilities must be prepared before // you begin multicasting, at a minimum joining network interfaces and // multicast groups. @@ -241,4 +241,4 @@ // IncludeSourceSpecificGroup may return an error. package ipv4 // import "golang.org/x/net/ipv4" -// BUG(mikio): This package is not implemented on NaCl and Plan 9. +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go index 2ab87736..50094637 100644 --- a/vendor/golang.org/x/net/ipv4/endpoint.go +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -6,7 +6,6 @@ package ipv4 import ( "net" - "syscall" "time" "golang.org/x/net/internal/socket" @@ -58,7 +57,7 @@ func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } // SetControlMessage sets the per packet IP-level socket options. func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) } @@ -67,7 +66,7 @@ func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { // endpoint. func (c *PacketConn) SetDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.PacketConn.SetDeadline(t) } @@ -76,7 +75,7 @@ func (c *PacketConn) SetDeadline(t time.Time) error { // endpoint. func (c *PacketConn) SetReadDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.PacketConn.SetReadDeadline(t) } @@ -85,7 +84,7 @@ func (c *PacketConn) SetReadDeadline(t time.Time) error { // endpoint. func (c *PacketConn) SetWriteDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.PacketConn.SetWriteDeadline(t) } @@ -93,7 +92,7 @@ func (c *PacketConn) SetWriteDeadline(t time.Time) error { // Close closes the endpoint. func (c *PacketConn) Close() error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.PacketConn.Close() } @@ -124,7 +123,7 @@ type RawConn struct { // SetControlMessage sets the per packet IP-level socket options. func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) } @@ -133,7 +132,7 @@ func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { // endpoint. func (c *RawConn) SetDeadline(t time.Time) error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.packetHandler.IPConn.SetDeadline(t) } @@ -142,7 +141,7 @@ func (c *RawConn) SetDeadline(t time.Time) error { // endpoint. func (c *RawConn) SetReadDeadline(t time.Time) error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.packetHandler.IPConn.SetReadDeadline(t) } @@ -151,7 +150,7 @@ func (c *RawConn) SetReadDeadline(t time.Time) error { // endpoint. func (c *RawConn) SetWriteDeadline(t time.Time) error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.packetHandler.IPConn.SetWriteDeadline(t) } @@ -159,7 +158,7 @@ func (c *RawConn) SetWriteDeadline(t time.Time) error { // Close closes the endpoint. func (c *RawConn) Close() error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.packetHandler.IPConn.Close() } diff --git a/vendor/golang.org/x/net/ipv4/example_test.go b/vendor/golang.org/x/net/ipv4/example_test.go deleted file mode 100644 index ddc7577e..00000000 --- a/vendor/golang.org/x/net/ipv4/example_test.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "fmt" - "log" - "net" - "os" - "runtime" - "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/ipv4" -) - -func ExampleConn_markingTCP() { - ln, err := net.Listen("tcp", "0.0.0.0:1024") - if err != nil { - log.Fatal(err) - } - defer ln.Close() - - for { - c, err := ln.Accept() - if err != nil { - log.Fatal(err) - } - go func(c net.Conn) { - defer c.Close() - if c.RemoteAddr().(*net.TCPAddr).IP.To4() != nil { - p := ipv4.NewConn(c) - if err := p.SetTOS(0x28); err != nil { // DSCP AF11 - log.Fatal(err) - } - if err := p.SetTTL(128); err != nil { - log.Fatal(err) - } - } - if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { - log.Fatal(err) - } - }(c) - } -} - -func ExamplePacketConn_servingOneShotMulticastDNS() { - c, err := net.ListenPacket("udp4", "0.0.0.0:5353") // mDNS over UDP - if err != nil { - log.Fatal(err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - - en0, err := net.InterfaceByName("en0") - if err != nil { - log.Fatal(err) - } - mDNSLinkLocal := net.UDPAddr{IP: net.IPv4(224, 0, 0, 251)} - if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { - log.Fatal(err) - } - defer p.LeaveGroup(en0, &mDNSLinkLocal) - if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { - log.Fatal(err) - } - - b := make([]byte, 1500) - for { - _, cm, peer, err := p.ReadFrom(b) - if err != nil { - log.Fatal(err) - } - if !cm.Dst.IsMulticast() || !cm.Dst.Equal(mDNSLinkLocal.IP) { - continue - } - answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this - if _, err := p.WriteTo(answers, nil, peer); err != nil { - log.Fatal(err) - } - } -} - -func ExamplePacketConn_tracingIPPacketRoute() { - // Tracing an IP packet route to www.google.com. - - const host = "www.google.com" - ips, err := net.LookupIP(host) - if err != nil { - log.Fatal(err) - } - var dst net.IPAddr - for _, ip := range ips { - if ip.To4() != nil { - dst.IP = ip - fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) - break - } - } - if dst.IP == nil { - log.Fatal("no A record found") - } - - c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4 - if err != nil { - log.Fatal(err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - - if err := p.SetControlMessage(ipv4.FlagTTL|ipv4.FlagSrc|ipv4.FlagDst|ipv4.FlagInterface, true); err != nil { - log.Fatal(err) - } - wm := icmp.Message{ - Type: ipv4.ICMPTypeEcho, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, - Data: []byte("HELLO-R-U-THERE"), - }, - } - - rb := make([]byte, 1500) - for i := 1; i <= 64; i++ { // up to 64 hops - wm.Body.(*icmp.Echo).Seq = i - wb, err := wm.Marshal(nil) - if err != nil { - log.Fatal(err) - } - if err := p.SetTTL(i); err != nil { - log.Fatal(err) - } - - // In the real world usually there are several - // multiple traffic-engineered paths for each hop. - // You may need to probe a few times to each hop. - begin := time.Now() - if _, err := p.WriteTo(wb, nil, &dst); err != nil { - log.Fatal(err) - } - if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { - log.Fatal(err) - } - n, cm, peer, err := p.ReadFrom(rb) - if err != nil { - if err, ok := err.(net.Error); ok && err.Timeout() { - fmt.Printf("%v\t*\n", i) - continue - } - log.Fatal(err) - } - rm, err := icmp.ParseMessage(1, rb[:n]) - if err != nil { - log.Fatal(err) - } - rtt := time.Since(begin) - - // In the real world you need to determine whether the - // received message is yours using ControlMessage.Src, - // ControlMessage.Dst, icmp.Echo.ID and icmp.Echo.Seq. - switch rm.Type { - case ipv4.ICMPTypeTimeExceeded: - names, _ := net.LookupAddr(peer.String()) - fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) - case ipv4.ICMPTypeEchoReply: - names, _ := net.LookupAddr(peer.String()) - fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) - return - default: - log.Printf("unknown ICMP message: %+v\n", rm) - } - } -} - -func ExampleRawConn_advertisingOSPFHello() { - c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4 - if err != nil { - log.Fatal(err) - } - defer c.Close() - r, err := ipv4.NewRawConn(c) - if err != nil { - log.Fatal(err) - } - - en0, err := net.InterfaceByName("en0") - if err != nil { - log.Fatal(err) - } - allSPFRouters := net.IPAddr{IP: net.IPv4(224, 0, 0, 5)} - if err := r.JoinGroup(en0, &allSPFRouters); err != nil { - log.Fatal(err) - } - defer r.LeaveGroup(en0, &allSPFRouters) - - hello := make([]byte, 24) // fake hello data, you need to implement this - ospf := make([]byte, 24) // fake ospf header, you need to implement this - ospf[0] = 2 // version 2 - ospf[1] = 1 // hello packet - ospf = append(ospf, hello...) - iph := &ipv4.Header{ - Version: ipv4.Version, - Len: ipv4.HeaderLen, - TOS: 0xc0, // DSCP CS6 - TotalLen: ipv4.HeaderLen + len(ospf), - TTL: 1, - Protocol: 89, - Dst: allSPFRouters.IP.To4(), - } - - var cm *ipv4.ControlMessage - switch runtime.GOOS { - case "darwin", "linux": - cm = &ipv4.ControlMessage{IfIndex: en0.Index} - default: - if err := r.SetMulticastInterface(en0); err != nil { - log.Fatal(err) - } - } - if err := r.WriteTo(iph, ospf, cm); err != nil { - log.Fatal(err) - } -} diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go deleted file mode 100644 index ffb44fe6..00000000 --- a/vendor/golang.org/x/net/ipv4/gen.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates system adaptation constants and types, -// internet protocol constants and tables by reading template files -// and IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "runtime" - "strconv" - "strings" -) - -func main() { - if err := genzsys(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := geniana(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func genzsys() error { - defs := "defs_" + runtime.GOOS + ".go" - f, err := os.Open(defs) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - f.Close() - cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) - b, err := cmd.Output() - if err != nil { - return err - } - b, err = format.Source(b) - if err != nil { - return err - } - zsys := "zsys_" + runtime.GOOS + ".go" - switch runtime.GOOS { - case "freebsd", "linux": - zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" - } - if err := ioutil.WriteFile(zsys, b, 0644); err != nil { - return err - } - return nil -} - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", - parseICMPv4Parameters, - }, -} - -func geniana() error { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") - fmt.Fprintf(&bb, "package ipv4\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) - } - if err := r.parse(&bb, resp.Body); err != nil { - return err - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - return err - } - if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { - return err - } - return nil -} - -func parseICMPv4Parameters(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var icp icmpv4Parameters - if err := dec.Decode(&icp); err != nil { - return err - } - prs := icp.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Descr == "" { - continue - } - fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) - fmt.Fprintf(w, "// %s\n", pr.OrigDescr) - } - fmt.Fprintf(w, ")\n\n") - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") - for _, pr := range prs { - if pr.Descr == "" { - continue - } - fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) - } - fmt.Fprintf(w, "}\n") - return nil -} - -type icmpv4Parameters struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Value string `xml:"value"` - Descr string `xml:"description"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonICMPv4ParamRecord struct { - OrigDescr string - Descr string - Value int -} - -func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { - id := -1 - for i, r := range icp.Registries { - if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { - id = i - break - } - } - if id < 0 { - return nil - } - prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) - sr := strings.NewReplacer( - "Messages", "", - "Message", "", - "ICMP", "", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range icp.Registries[id].Records { - if strings.Contains(pr.Descr, "Reserved") || - strings.Contains(pr.Descr, "Unassigned") || - strings.Contains(pr.Descr, "Deprecated") || - strings.Contains(pr.Descr, "Experiment") || - strings.Contains(pr.Descr, "experiment") { - continue - } - ss := strings.Split(pr.Descr, "\n") - if len(ss) > 1 { - prs[i].Descr = strings.Join(ss, " ") - } else { - prs[i].Descr = ss[0] - } - s := strings.TrimSpace(prs[i].Descr) - prs[i].OrigDescr = s - prs[i].Descr = sr.Replace(s) - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go index 119bf841..587ae4a1 100644 --- a/vendor/golang.org/x/net/ipv4/genericopt.go +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -4,12 +4,10 @@ package ipv4 -import "syscall" - // TOS returns the type-of-service field value for outgoing packets. func (c *genericOpt) TOS() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoTOS] if !ok { @@ -22,7 +20,7 @@ func (c *genericOpt) TOS() (int, error) { // packets. func (c *genericOpt) SetTOS(tos int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoTOS] if !ok { @@ -34,7 +32,7 @@ func (c *genericOpt) SetTOS(tos int) error { // TTL returns the time-to-live field value for outgoing packets. func (c *genericOpt) TTL() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoTTL] if !ok { @@ -47,7 +45,7 @@ func (c *genericOpt) TTL() (int, error) { // packets. func (c *genericOpt) SetTTL(ttl int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoTTL] if !ok { diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go index 8bb0f0f4..a8c8f7a6 100644 --- a/vendor/golang.org/x/net/ipv4/header.go +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -9,7 +9,6 @@ import ( "fmt" "net" "runtime" - "syscall" "golang.org/x/net/internal/socket" ) @@ -52,9 +51,13 @@ func (h *Header) String() string { } // Marshal returns the binary encoding of h. +// +// The returned slice is in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. func (h *Header) Marshal() ([]byte, error) { if h == nil { - return nil, syscall.EINVAL + return nil, errInvalidConn } if h.Len < HeaderLen { return nil, errHeaderTooShort @@ -98,7 +101,11 @@ func (h *Header) Marshal() ([]byte, error) { return b, nil } -// Parse parses b as an IPv4 header and sotres the result in h. +// Parse parses b as an IPv4 header and stores the result in h. +// +// The provided b must be in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. func (h *Header) Parse(b []byte) error { if h == nil || len(b) < HeaderLen { return errHeaderTooShort @@ -150,6 +157,10 @@ func (h *Header) Parse(b []byte) error { } // ParseHeader parses b as an IPv4 header. +// +// The provided b must be in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. func ParseHeader(b []byte) (*Header, error) { h := new(Header) if err := h.Parse(b); err != nil { diff --git a/vendor/golang.org/x/net/ipv4/header_test.go b/vendor/golang.org/x/net/ipv4/header_test.go deleted file mode 100644 index a246aeea..00000000 --- a/vendor/golang.org/x/net/ipv4/header_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4 - -import ( - "bytes" - "encoding/binary" - "net" - "reflect" - "runtime" - "strings" - "testing" - - "golang.org/x/net/internal/socket" -) - -type headerTest struct { - wireHeaderFromKernel []byte - wireHeaderToKernel []byte - wireHeaderFromTradBSDKernel []byte - wireHeaderToTradBSDKernel []byte - wireHeaderFromFreeBSD10Kernel []byte - wireHeaderToFreeBSD10Kernel []byte - *Header -} - -var headerLittleEndianTests = []headerTest{ - // TODO(mikio): Add platform dependent wire header formats when - // we support new platforms. - { - wireHeaderFromKernel: []byte{ - 0x45, 0x01, 0xbe, 0xef, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - wireHeaderToKernel: []byte{ - 0x45, 0x01, 0xbe, 0xef, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - wireHeaderFromTradBSDKernel: []byte{ - 0x45, 0x01, 0xdb, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - wireHeaderToTradBSDKernel: []byte{ - 0x45, 0x01, 0xef, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - wireHeaderFromFreeBSD10Kernel: []byte{ - 0x45, 0x01, 0xef, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - wireHeaderToFreeBSD10Kernel: []byte{ - 0x45, 0x01, 0xef, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - Header: &Header{ - Version: Version, - Len: HeaderLen, - TOS: 1, - TotalLen: 0xbeef, - ID: 0xcafe, - Flags: DontFragment, - FragOff: 1500, - TTL: 255, - Protocol: 1, - Checksum: 0xdead, - Src: net.IPv4(172, 16, 254, 254), - Dst: net.IPv4(192, 168, 0, 1), - }, - }, - - // with option headers - { - wireHeaderFromKernel: []byte{ - 0x46, 0x01, 0xbe, 0xf3, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - 0xff, 0xfe, 0xfe, 0xff, - }, - wireHeaderToKernel: []byte{ - 0x46, 0x01, 0xbe, 0xf3, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - 0xff, 0xfe, 0xfe, 0xff, - }, - wireHeaderFromTradBSDKernel: []byte{ - 0x46, 0x01, 0xdb, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - 0xff, 0xfe, 0xfe, 0xff, - }, - wireHeaderToTradBSDKernel: []byte{ - 0x46, 0x01, 0xf3, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - 0xff, 0xfe, 0xfe, 0xff, - }, - wireHeaderFromFreeBSD10Kernel: []byte{ - 0x46, 0x01, 0xf3, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - 0xff, 0xfe, 0xfe, 0xff, - }, - wireHeaderToFreeBSD10Kernel: []byte{ - 0x46, 0x01, 0xf3, 0xbe, - 0xca, 0xfe, 0xdc, 0x45, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - 0xff, 0xfe, 0xfe, 0xff, - }, - Header: &Header{ - Version: Version, - Len: HeaderLen + 4, - TOS: 1, - TotalLen: 0xbef3, - ID: 0xcafe, - Flags: DontFragment, - FragOff: 1500, - TTL: 255, - Protocol: 1, - Checksum: 0xdead, - Src: net.IPv4(172, 16, 254, 254), - Dst: net.IPv4(192, 168, 0, 1), - Options: []byte{0xff, 0xfe, 0xfe, 0xff}, - }, - }, -} - -func TestMarshalHeader(t *testing.T) { - if socket.NativeEndian != binary.LittleEndian { - t.Skip("no test for non-little endian machine yet") - } - - for _, tt := range headerLittleEndianTests { - b, err := tt.Header.Marshal() - if err != nil { - t.Fatal(err) - } - var wh []byte - switch runtime.GOOS { - case "darwin", "dragonfly", "netbsd": - wh = tt.wireHeaderToTradBSDKernel - case "freebsd": - switch { - case freebsdVersion < 1000000: - wh = tt.wireHeaderToTradBSDKernel - case 1000000 <= freebsdVersion && freebsdVersion < 1100000: - wh = tt.wireHeaderToFreeBSD10Kernel - default: - wh = tt.wireHeaderToKernel - } - default: - wh = tt.wireHeaderToKernel - } - if !bytes.Equal(b, wh) { - t.Fatalf("got %#v; want %#v", b, wh) - } - } -} - -func TestParseHeader(t *testing.T) { - if socket.NativeEndian != binary.LittleEndian { - t.Skip("no test for big endian machine yet") - } - - for _, tt := range headerLittleEndianTests { - var wh []byte - switch runtime.GOOS { - case "darwin", "dragonfly", "netbsd": - wh = tt.wireHeaderFromTradBSDKernel - case "freebsd": - switch { - case freebsdVersion < 1000000: - wh = tt.wireHeaderFromTradBSDKernel - case 1000000 <= freebsdVersion && freebsdVersion < 1100000: - wh = tt.wireHeaderFromFreeBSD10Kernel - default: - wh = tt.wireHeaderFromKernel - } - default: - wh = tt.wireHeaderFromKernel - } - h, err := ParseHeader(wh) - if err != nil { - t.Fatal(err) - } - if err := h.Parse(wh); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(h, tt.Header) { - t.Fatalf("got %#v; want %#v", h, tt.Header) - } - s := h.String() - if strings.Contains(s, ",") { - t.Fatalf("should be space-separated values: %s", s) - } - } -} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go index a5052e32..8d8ff98e 100644 --- a/vendor/golang.org/x/net/ipv4/helper.go +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -10,6 +10,7 @@ import ( ) var ( + errInvalidConn = errors.New("invalid connection") errMissingAddress = errors.New("missing address") errMissingHeader = errors.New("missing header") errHeaderTooShort = errors.New("header too short") diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go index be10c948..4375b409 100644 --- a/vendor/golang.org/x/net/ipv4/iana.go +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -1,9 +1,9 @@ // go generate gen.go -// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. package ipv4 -// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 const ( ICMPTypeEchoReply ICMPType = 0 // Echo Reply ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable @@ -16,9 +16,11 @@ const ( ICMPTypeTimestamp ICMPType = 13 // Timestamp ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply ICMPTypePhoturis ICMPType = 40 // Photuris + ICMPTypeExtendedEchoRequest ICMPType = 42 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 43 // Extended Echo Reply ) -// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 var icmpTypes = map[ICMPType]string{ 0: "echo reply", 3: "destination unreachable", @@ -31,4 +33,6 @@ var icmpTypes = map[ICMPType]string{ 13: "timestamp", 14: "timestamp reply", 40: "photuris", + 42: "extended echo request", + 43: "extended echo reply", } diff --git a/vendor/golang.org/x/net/ipv4/icmp_test.go b/vendor/golang.org/x/net/ipv4/icmp_test.go deleted file mode 100644 index 3324b54d..00000000 --- a/vendor/golang.org/x/net/ipv4/icmp_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "net" - "reflect" - "runtime" - "testing" - - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" -) - -var icmpStringTests = []struct { - in ipv4.ICMPType - out string -}{ - {ipv4.ICMPTypeDestinationUnreachable, "destination unreachable"}, - - {256, ""}, -} - -func TestICMPString(t *testing.T) { - for _, tt := range icmpStringTests { - s := tt.in.String() - if s != tt.out { - t.Errorf("got %s; want %s", s, tt.out) - } - } -} - -func TestICMPFilter(t *testing.T) { - switch runtime.GOOS { - case "linux": - default: - t.Skipf("not supported on %s", runtime.GOOS) - } - - var f ipv4.ICMPFilter - for _, toggle := range []bool{false, true} { - f.SetAll(toggle) - for _, typ := range []ipv4.ICMPType{ - ipv4.ICMPTypeDestinationUnreachable, - ipv4.ICMPTypeEchoReply, - ipv4.ICMPTypeTimeExceeded, - ipv4.ICMPTypeParameterProblem, - } { - f.Accept(typ) - if f.WillBlock(typ) { - t.Errorf("ipv4.ICMPFilter.Set(%v, false) failed", typ) - } - f.Block(typ) - if !f.WillBlock(typ) { - t.Errorf("ipv4.ICMPFilter.Set(%v, true) failed", typ) - } - } - } -} - -func TestSetICMPFilter(t *testing.T) { - switch runtime.GOOS { - case "linux": - default: - t.Skipf("not supported on %s", runtime.GOOS) - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") - if err != nil { - t.Fatal(err) - } - defer c.Close() - - p := ipv4.NewPacketConn(c) - - var f ipv4.ICMPFilter - f.SetAll(true) - f.Accept(ipv4.ICMPTypeEcho) - f.Accept(ipv4.ICMPTypeEchoReply) - if err := p.SetICMPFilter(&f); err != nil { - t.Fatal(err) - } - kf, err := p.ICMPFilter() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(kf, &f) { - t.Fatalf("got %#v; want %#v", kf, f) - } -} diff --git a/vendor/golang.org/x/net/ipv4/multicast_test.go b/vendor/golang.org/x/net/ipv4/multicast_test.go deleted file mode 100644 index bcf49736..00000000 --- a/vendor/golang.org/x/net/ipv4/multicast_test.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "bytes" - "net" - "os" - "runtime" - "testing" - "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" -) - -var packetConnReadWriteMulticastUDPTests = []struct { - addr string - grp, src *net.UDPAddr -}{ - {"224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 - - {"232.0.1.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 -} - -func TestPacketConnReadWriteMulticastUDP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - for _, tt := range packetConnReadWriteMulticastUDPTests { - c, err := net.ListenPacket("udp4", tt.addr) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - grp := *tt.grp - grp.Port = c.LocalAddr().(*net.UDPAddr).Port - p := ipv4.NewPacketConn(c) - defer p.Close() - if tt.src == nil { - if err := p.JoinGroup(ifi, &grp); err != nil { - t.Fatal(err) - } - defer p.LeaveGroup(ifi, &grp) - } else { - if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { - switch runtime.GOOS { - case "freebsd", "linux": - default: // platforms that don't support IGMPv2/3 fail here - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) - } - if err := p.SetMulticastInterface(ifi); err != nil { - t.Fatal(err) - } - if _, err := p.MulticastInterface(); err != nil { - t.Fatal(err) - } - if err := p.SetMulticastLoopback(true); err != nil { - t.Fatal(err) - } - if _, err := p.MulticastLoopback(); err != nil { - t.Fatal(err) - } - cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface - wb := []byte("HELLO-R-U-THERE") - - for i, toggle := range []bool{true, false, true} { - if err := p.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { - t.Fatal(err) - } - p.SetMulticastTTL(i + 1) - if n, err := p.WriteTo(wb, nil, &grp); err != nil { - t.Fatal(err) - } else if n != len(wb) { - t.Fatalf("got %v; want %v", n, len(wb)) - } - rb := make([]byte, 128) - if n, _, _, err := p.ReadFrom(rb); err != nil { - t.Fatal(err) - } else if !bytes.Equal(rb[:n], wb) { - t.Fatalf("got %v; want %v", rb[:n], wb) - } - } - } -} - -var packetConnReadWriteMulticastICMPTests = []struct { - grp, src *net.IPAddr -}{ - {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 - - {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 -} - -func TestPacketConnReadWriteMulticastICMP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "solaris", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - for _, tt := range packetConnReadWriteMulticastICMPTests { - c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") - if err != nil { - t.Fatal(err) - } - defer c.Close() - - p := ipv4.NewPacketConn(c) - defer p.Close() - if tt.src == nil { - if err := p.JoinGroup(ifi, tt.grp); err != nil { - t.Fatal(err) - } - defer p.LeaveGroup(ifi, tt.grp) - } else { - if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { - switch runtime.GOOS { - case "freebsd", "linux": - default: // platforms that don't support IGMPv2/3 fail here - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) - } - if err := p.SetMulticastInterface(ifi); err != nil { - t.Fatal(err) - } - if _, err := p.MulticastInterface(); err != nil { - t.Fatal(err) - } - if err := p.SetMulticastLoopback(true); err != nil { - t.Fatal(err) - } - if _, err := p.MulticastLoopback(); err != nil { - t.Fatal(err) - } - cf := ipv4.FlagDst | ipv4.FlagInterface - if runtime.GOOS != "solaris" { - // Solaris never allows to modify ICMP properties. - cf |= ipv4.FlagTTL - } - - for i, toggle := range []bool{true, false, true} { - wb, err := (&icmp.Message{ - Type: ipv4.ICMPTypeEcho, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: i + 1, - Data: []byte("HELLO-R-U-THERE"), - }, - }).Marshal(nil) - if err != nil { - t.Fatal(err) - } - if err := p.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { - t.Fatal(err) - } - p.SetMulticastTTL(i + 1) - if n, err := p.WriteTo(wb, nil, tt.grp); err != nil { - t.Fatal(err) - } else if n != len(wb) { - t.Fatalf("got %v; want %v", n, len(wb)) - } - rb := make([]byte, 128) - if n, _, _, err := p.ReadFrom(rb); err != nil { - t.Fatal(err) - } else { - m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) - if err != nil { - t.Fatal(err) - } - switch { - case m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 - case m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 - default: - t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) - } - } - } - } -} - -var rawConnReadWriteMulticastICMPTests = []struct { - grp, src *net.IPAddr -}{ - {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 - - {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 -} - -func TestRawConnReadWriteMulticastICMP(t *testing.T) { - if testing.Short() { - t.Skip("to avoid external network") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - for _, tt := range rawConnReadWriteMulticastICMPTests { - c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") - if err != nil { - t.Fatal(err) - } - defer c.Close() - - r, err := ipv4.NewRawConn(c) - if err != nil { - t.Fatal(err) - } - defer r.Close() - if tt.src == nil { - if err := r.JoinGroup(ifi, tt.grp); err != nil { - t.Fatal(err) - } - defer r.LeaveGroup(ifi, tt.grp) - } else { - if err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { - switch runtime.GOOS { - case "freebsd", "linux": - default: // platforms that don't support IGMPv2/3 fail here - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - defer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) - } - if err := r.SetMulticastInterface(ifi); err != nil { - t.Fatal(err) - } - if _, err := r.MulticastInterface(); err != nil { - t.Fatal(err) - } - if err := r.SetMulticastLoopback(true); err != nil { - t.Fatal(err) - } - if _, err := r.MulticastLoopback(); err != nil { - t.Fatal(err) - } - cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface - - for i, toggle := range []bool{true, false, true} { - wb, err := (&icmp.Message{ - Type: ipv4.ICMPTypeEcho, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: i + 1, - Data: []byte("HELLO-R-U-THERE"), - }, - }).Marshal(nil) - if err != nil { - t.Fatal(err) - } - wh := &ipv4.Header{ - Version: ipv4.Version, - Len: ipv4.HeaderLen, - TOS: i + 1, - TotalLen: ipv4.HeaderLen + len(wb), - Protocol: 1, - Dst: tt.grp.IP, - } - if err := r.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - if err := r.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { - t.Fatal(err) - } - r.SetMulticastTTL(i + 1) - if err := r.WriteTo(wh, wb, nil); err != nil { - t.Fatal(err) - } - rb := make([]byte, ipv4.HeaderLen+128) - if rh, b, _, err := r.ReadFrom(rb); err != nil { - t.Fatal(err) - } else { - m, err := icmp.ParseMessage(iana.ProtocolICMP, b) - if err != nil { - t.Fatal(err) - } - switch { - case (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 - case rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 - default: - t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) - } - } - } - } -} diff --git a/vendor/golang.org/x/net/ipv4/multicastlistener_test.go b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go deleted file mode 100644 index e43fbbe0..00000000 --- a/vendor/golang.org/x/net/ipv4/multicastlistener_test.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "net" - "runtime" - "testing" - - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" -) - -var udpMultipleGroupListenerTests = []net.Addr{ - &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, // see RFC 4727 - &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}, - &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, -} - -func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if testing.Short() { - t.Skip("to avoid external network") - } - - for _, gaddr := range udpMultipleGroupListenerTests { - c, err := net.ListenPacket("udp4", "0.0.0.0:0") // wildcard address with no reusable port - if err != nil { - t.Fatal(err) - } - defer c.Close() - - p := ipv4.NewPacketConn(c) - var mift []*net.Interface - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - for i, ifi := range ift { - if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { - continue - } - if err := p.JoinGroup(&ifi, gaddr); err != nil { - t.Fatal(err) - } - mift = append(mift, &ift[i]) - } - for _, ifi := range mift { - if err := p.LeaveGroup(ifi, gaddr); err != nil { - t.Fatal(err) - } - } - } -} - -func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if testing.Short() { - t.Skip("to avoid external network") - } - - for _, gaddr := range udpMultipleGroupListenerTests { - c1, err := net.ListenPacket("udp4", "224.0.0.0:0") // wildcard address with reusable port - if err != nil { - t.Fatal(err) - } - defer c1.Close() - _, port, err := net.SplitHostPort(c1.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } - c2, err := net.ListenPacket("udp4", net.JoinHostPort("224.0.0.0", port)) // wildcard address with reusable port - if err != nil { - t.Fatal(err) - } - defer c2.Close() - - var ps [2]*ipv4.PacketConn - ps[0] = ipv4.NewPacketConn(c1) - ps[1] = ipv4.NewPacketConn(c2) - var mift []*net.Interface - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - for i, ifi := range ift { - if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { - continue - } - for _, p := range ps { - if err := p.JoinGroup(&ifi, gaddr); err != nil { - t.Fatal(err) - } - } - mift = append(mift, &ift[i]) - } - for _, ifi := range mift { - for _, p := range ps { - if err := p.LeaveGroup(ifi, gaddr); err != nil { - t.Fatal(err) - } - } - } - } -} - -func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if testing.Short() { - t.Skip("to avoid external network") - } - - gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 - type ml struct { - c *ipv4.PacketConn - ifi *net.Interface - } - var mlt []*ml - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - port := "0" - for i, ifi := range ift { - ip, ok := nettest.IsMulticastCapable("ip4", &ifi) - if !ok { - continue - } - c, err := net.ListenPacket("udp4", net.JoinHostPort(ip.String(), port)) // unicast address with non-reusable port - if err != nil { - // The listen may fail when the serivce is - // already in use, but it's fine because the - // purpose of this is not to test the - // bookkeeping of IP control block inside the - // kernel. - t.Log(err) - continue - } - defer c.Close() - if port == "0" { - _, port, err = net.SplitHostPort(c.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } - } - p := ipv4.NewPacketConn(c) - if err := p.JoinGroup(&ifi, &gaddr); err != nil { - t.Fatal(err) - } - mlt = append(mlt, &ml{p, &ift[i]}) - } - for _, m := range mlt { - if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { - t.Fatal(err) - } - } -} - -func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if testing.Short() { - t.Skip("to avoid external network") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") // wildcard address - if err != nil { - t.Fatal(err) - } - defer c.Close() - - r, err := ipv4.NewRawConn(c) - if err != nil { - t.Fatal(err) - } - gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 - var mift []*net.Interface - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - for i, ifi := range ift { - if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { - continue - } - if err := r.JoinGroup(&ifi, &gaddr); err != nil { - t.Fatal(err) - } - mift = append(mift, &ift[i]) - } - for _, ifi := range mift { - if err := r.LeaveGroup(ifi, &gaddr); err != nil { - t.Fatal(err) - } - } -} - -func TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if testing.Short() { - t.Skip("to avoid external network") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 - type ml struct { - c *ipv4.RawConn - ifi *net.Interface - } - var mlt []*ml - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - for i, ifi := range ift { - ip, ok := nettest.IsMulticastCapable("ip4", &ifi) - if !ok { - continue - } - c, err := net.ListenPacket("ip4:253", ip.String()) // unicast address - if err != nil { - t.Fatal(err) - } - defer c.Close() - r, err := ipv4.NewRawConn(c) - if err != nil { - t.Fatal(err) - } - if err := r.JoinGroup(&ifi, &gaddr); err != nil { - t.Fatal(err) - } - mlt = append(mlt, &ml{r, &ift[i]}) - } - for _, m := range mlt { - if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go deleted file mode 100644 index f7efac24..00000000 --- a/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "net" - "runtime" - "testing" - - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" -) - -var packetConnMulticastSocketOptionTests = []struct { - net, proto, addr string - grp, src net.Addr -}{ - {"udp4", "", "224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, nil}, // see RFC 4727 - {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 - - {"udp4", "", "232.0.0.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 249)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 - {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 -} - -func TestPacketConnMulticastSocketOptions(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9": - t.Skipf("not supported on %s", runtime.GOOS) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - m, ok := nettest.SupportsRawIPSocket() - for _, tt := range packetConnMulticastSocketOptionTests { - if tt.net == "ip4" && !ok { - t.Log(m) - continue - } - c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) - if err != nil { - t.Fatal(err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - defer p.Close() - - if tt.src == nil { - testMulticastSocketOptions(t, p, ifi, tt.grp) - } else { - testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) - } - } -} - -var rawConnMulticastSocketOptionTests = []struct { - grp, src net.Addr -}{ - {&net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 - - {&net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 -} - -func TestRawConnMulticastSocketOptions(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9": - t.Skipf("not supported on %s", runtime.GOOS) - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - for _, tt := range rawConnMulticastSocketOptionTests { - c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") - if err != nil { - t.Fatal(err) - } - defer c.Close() - r, err := ipv4.NewRawConn(c) - if err != nil { - t.Fatal(err) - } - defer r.Close() - - if tt.src == nil { - testMulticastSocketOptions(t, r, ifi, tt.grp) - } else { - testSourceSpecificMulticastSocketOptions(t, r, ifi, tt.grp, tt.src) - } - } -} - -type testIPv4MulticastConn interface { - MulticastTTL() (int, error) - SetMulticastTTL(ttl int) error - MulticastLoopback() (bool, error) - SetMulticastLoopback(bool) error - JoinGroup(*net.Interface, net.Addr) error - LeaveGroup(*net.Interface, net.Addr) error - JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error - LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error - ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error - IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error -} - -func testMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp net.Addr) { - const ttl = 255 - if err := c.SetMulticastTTL(ttl); err != nil { - t.Error(err) - return - } - if v, err := c.MulticastTTL(); err != nil { - t.Error(err) - return - } else if v != ttl { - t.Errorf("got %v; want %v", v, ttl) - return - } - - for _, toggle := range []bool{true, false} { - if err := c.SetMulticastLoopback(toggle); err != nil { - t.Error(err) - return - } - if v, err := c.MulticastLoopback(); err != nil { - t.Error(err) - return - } else if v != toggle { - t.Errorf("got %v; want %v", v, toggle) - return - } - } - - if err := c.JoinGroup(ifi, grp); err != nil { - t.Error(err) - return - } - if err := c.LeaveGroup(ifi, grp); err != nil { - t.Error(err) - return - } -} - -func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp, src net.Addr) { - // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP - if err := c.JoinGroup(ifi, grp); err != nil { - t.Error(err) - return - } - if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { - switch runtime.GOOS { - case "freebsd", "linux": - default: // platforms that don't support IGMPv2/3 fail here - t.Logf("not supported on %s", runtime.GOOS) - return - } - t.Error(err) - return - } - if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { - t.Error(err) - return - } - if err := c.LeaveGroup(ifi, grp); err != nil { - t.Error(err) - return - } - - // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP - if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { - t.Error(err) - return - } - if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { - t.Error(err) - return - } - - // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP - if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { - t.Error(err) - return - } - if err := c.LeaveGroup(ifi, grp); err != nil { - t.Error(err) - return - } -} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go index f00f5b05..966bb776 100644 --- a/vendor/golang.org/x/net/ipv4/packet.go +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -6,7 +6,6 @@ package ipv4 import ( "net" - "syscall" "golang.org/x/net/internal/socket" ) @@ -28,7 +27,7 @@ func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn // header h, the payload p and the control message cm. func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { if !c.ok() { - return nil, nil, nil, syscall.EINVAL + return nil, nil, nil, errInvalidConn } return c.readFrom(b) } @@ -63,7 +62,7 @@ func slicePacket(b []byte) (h, p []byte, err error) { // Options = optional func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } return c.writeTo(h, p, cm) } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go index 3f06d760..204a49fe 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -2,14 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv4 -import ( - "net" - "syscall" -) +import "net" // ReadFrom reads a payload of the received IPv4 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -17,7 +14,7 @@ import ( // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { - return 0, nil, nil, syscall.EINVAL + return 0, nil, nil, errInvalidConn } return c.readFrom(b) } @@ -30,7 +27,7 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. // control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } return c.writeTo(b, cm, dst) } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go index d26ccd90..8d45599f 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build !go1.9 -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go index 2f193118..4081aad8 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build go1.9 -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index 3926de70..1d434c61 100644 --- a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -2,14 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 windows +// +build js nacl plan9 windows package ipv4 -import ( - "net" - "syscall" -) +import "net" // ReadFrom reads a payload of the received IPv4 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -17,7 +14,7 @@ import ( // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { - return 0, nil, nil, syscall.EINVAL + return 0, nil, nil, errInvalidConn } if n, src, err = c.PacketConn.ReadFrom(b); err != nil { return 0, nil, nil, err @@ -33,7 +30,7 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. // control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } if dst == nil { return 0, errMissingAddress diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go deleted file mode 100644 index 1cd926e7..00000000 --- a/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package ipv4_test - -import ( - "bytes" - "fmt" - "net" - "runtime" - "strings" - "sync" - "testing" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" -) - -func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - b.Skipf("not supported on %s", runtime.GOOS) - } - - payload := []byte("HELLO-R-U-THERE") - iph, err := (&ipv4.Header{ - Version: ipv4.Version, - Len: ipv4.HeaderLen, - TotalLen: ipv4.HeaderLen + len(payload), - TTL: 1, - Protocol: iana.ProtocolReserved, - Src: net.IPv4(192, 0, 2, 1), - Dst: net.IPv4(192, 0, 2, 254), - }).Marshal() - if err != nil { - b.Fatal(err) - } - greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} - datagram := append(greh, append(iph, payload...)...) - bb := make([]byte, 128) - cm := ipv4.ControlMessage{ - Src: net.IPv4(127, 0, 0, 1), - } - if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { - cm.IfIndex = ifi.Index - } - - b.Run("UDP", func(b *testing.B) { - c, err := nettest.NewLocalPacketListener("udp4") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - dst := c.LocalAddr() - cf := ipv4.FlagTTL | ipv4.FlagInterface - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - b.Run("Net", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(payload, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("ToFrom", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(payload, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - }) - b.Run("IP", func(b *testing.B) { - switch runtime.GOOS { - case "netbsd": - b.Skip("need to configure gre on netbsd") - case "openbsd": - b.Skip("net.inet.gre.allow=0 by default on openbsd") - } - - c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - dst := c.LocalAddr() - cf := ipv4.FlagTTL | ipv4.FlagInterface - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - b.Run("Net", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(datagram, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("ToFrom", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(datagram, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - }) -} - -func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - - payload := []byte("HELLO-R-U-THERE") - iph, err := (&ipv4.Header{ - Version: ipv4.Version, - Len: ipv4.HeaderLen, - TotalLen: ipv4.HeaderLen + len(payload), - TTL: 1, - Protocol: iana.ProtocolReserved, - Src: net.IPv4(192, 0, 2, 1), - Dst: net.IPv4(192, 0, 2, 254), - }).Marshal() - if err != nil { - t.Fatal(err) - } - greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} - datagram := append(greh, append(iph, payload...)...) - - t.Run("UDP", func(t *testing.T) { - c, err := nettest.NewLocalPacketListener("udp4") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - t.Run("ToFrom", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) - }) - }) - t.Run("IP", func(t *testing.T) { - switch runtime.GOOS { - case "netbsd": - t.Skip("need to configure gre on netbsd") - case "openbsd": - t.Skip("net.inet.gre.allow=0 by default on openbsd") - } - - c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - t.Run("ToFrom", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) - }) - }) -} - -func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr) { - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface - - if err := p.SetControlMessage(cf, true); err != nil { // probe before test - if nettest.ProtocolNotSupported(err) { - t.Skipf("not supported on %s", runtime.GOOS) - } - t.Fatal(err) - } - - var wg sync.WaitGroup - reader := func() { - defer wg.Done() - b := make([]byte, 128) - n, cm, _, err := p.ReadFrom(b) - if err != nil { - t.Error(err) - return - } - if !bytes.Equal(b[:n], data) { - t.Errorf("got %#v; want %#v", b[:n], data) - return - } - s := cm.String() - if strings.Contains(s, ",") { - t.Errorf("should be space-separated values: %s", s) - return - } - } - writer := func(toggle bool) { - defer wg.Done() - cm := ipv4.ControlMessage{ - Src: net.IPv4(127, 0, 0, 1), - } - if ifi != nil { - cm.IfIndex = ifi.Index - } - if err := p.SetControlMessage(cf, toggle); err != nil { - t.Error(err) - return - } - n, err := p.WriteTo(data, &cm, dst) - if err != nil { - t.Error(err) - return - } - if n != len(data) { - t.Errorf("got %d; want %d", n, len(data)) - return - } - } - - const N = 10 - wg.Add(N) - for i := 0; i < N; i++ { - go reader() - } - wg.Add(2 * N) - for i := 0; i < 2*N; i++ { - go writer(i%2 != 0) - - } - wg.Add(N) - for i := 0; i < N; i++ { - go reader() - } - wg.Wait() -} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go deleted file mode 100644 index 365de022..00000000 --- a/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package ipv4_test - -import ( - "bytes" - "fmt" - "net" - "runtime" - "strings" - "sync" - "testing" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" -) - -func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - b.Skipf("not supported on %s", runtime.GOOS) - } - - payload := []byte("HELLO-R-U-THERE") - iph, err := (&ipv4.Header{ - Version: ipv4.Version, - Len: ipv4.HeaderLen, - TotalLen: ipv4.HeaderLen + len(payload), - TTL: 1, - Protocol: iana.ProtocolReserved, - Src: net.IPv4(192, 0, 2, 1), - Dst: net.IPv4(192, 0, 2, 254), - }).Marshal() - if err != nil { - b.Fatal(err) - } - greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} - datagram := append(greh, append(iph, payload...)...) - bb := make([]byte, 128) - cm := ipv4.ControlMessage{ - Src: net.IPv4(127, 0, 0, 1), - } - if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { - cm.IfIndex = ifi.Index - } - - b.Run("UDP", func(b *testing.B) { - c, err := nettest.NewLocalPacketListener("udp4") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - dst := c.LocalAddr() - cf := ipv4.FlagTTL | ipv4.FlagInterface - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - wms := []ipv4.Message{ - { - Buffers: [][]byte{payload}, - Addr: dst, - OOB: cm.Marshal(), - }, - } - rms := []ipv4.Message{ - { - Buffers: [][]byte{bb}, - OOB: ipv4.NewControlMessage(cf), - }, - } - b.Run("Net", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(payload, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("ToFrom", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(payload, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("Batch", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteBatch(wms, 0); err != nil { - b.Fatal(err) - } - if _, err := p.ReadBatch(rms, 0); err != nil { - b.Fatal(err) - } - } - }) - }) - b.Run("IP", func(b *testing.B) { - switch runtime.GOOS { - case "netbsd": - b.Skip("need to configure gre on netbsd") - case "openbsd": - b.Skip("net.inet.gre.allow=0 by default on openbsd") - } - - c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - dst := c.LocalAddr() - cf := ipv4.FlagTTL | ipv4.FlagInterface - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - wms := []ipv4.Message{ - { - Buffers: [][]byte{datagram}, - Addr: dst, - OOB: cm.Marshal(), - }, - } - rms := []ipv4.Message{ - { - Buffers: [][]byte{bb}, - OOB: ipv4.NewControlMessage(cf), - }, - } - b.Run("Net", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(datagram, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("ToFrom", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(datagram, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("Batch", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteBatch(wms, 0); err != nil { - b.Fatal(err) - } - if _, err := p.ReadBatch(rms, 0); err != nil { - b.Fatal(err) - } - } - }) - }) -} - -func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - - payload := []byte("HELLO-R-U-THERE") - iph, err := (&ipv4.Header{ - Version: ipv4.Version, - Len: ipv4.HeaderLen, - TotalLen: ipv4.HeaderLen + len(payload), - TTL: 1, - Protocol: iana.ProtocolReserved, - Src: net.IPv4(192, 0, 2, 1), - Dst: net.IPv4(192, 0, 2, 254), - }).Marshal() - if err != nil { - t.Fatal(err) - } - greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} - datagram := append(greh, append(iph, payload...)...) - - t.Run("UDP", func(t *testing.T) { - c, err := nettest.NewLocalPacketListener("udp4") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - t.Run("ToFrom", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) - }) - t.Run("Batch", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) - }) - }) - t.Run("IP", func(t *testing.T) { - switch runtime.GOOS { - case "netbsd": - t.Skip("need to configure gre on netbsd") - case "openbsd": - t.Skip("net.inet.gre.allow=0 by default on openbsd") - } - - c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - t.Run("ToFrom", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) - }) - t.Run("Batch", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) - }) - }) -} - -func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr, batch bool) { - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface - - if err := p.SetControlMessage(cf, true); err != nil { // probe before test - if nettest.ProtocolNotSupported(err) { - t.Skipf("not supported on %s", runtime.GOOS) - } - t.Fatal(err) - } - - var wg sync.WaitGroup - reader := func() { - defer wg.Done() - b := make([]byte, 128) - n, cm, _, err := p.ReadFrom(b) - if err != nil { - t.Error(err) - return - } - if !bytes.Equal(b[:n], data) { - t.Errorf("got %#v; want %#v", b[:n], data) - return - } - s := cm.String() - if strings.Contains(s, ",") { - t.Errorf("should be space-separated values: %s", s) - return - } - } - batchReader := func() { - defer wg.Done() - ms := []ipv4.Message{ - { - Buffers: [][]byte{make([]byte, 128)}, - OOB: ipv4.NewControlMessage(cf), - }, - } - n, err := p.ReadBatch(ms, 0) - if err != nil { - t.Error(err) - return - } - if n != len(ms) { - t.Errorf("got %d; want %d", n, len(ms)) - return - } - var cm ipv4.ControlMessage - if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { - t.Error(err) - return - } - var b []byte - if _, ok := dst.(*net.IPAddr); ok { - var h ipv4.Header - if err := h.Parse(ms[0].Buffers[0][:ms[0].N]); err != nil { - t.Error(err) - return - } - b = ms[0].Buffers[0][h.Len:ms[0].N] - } else { - b = ms[0].Buffers[0][:ms[0].N] - } - if !bytes.Equal(b, data) { - t.Errorf("got %#v; want %#v", b, data) - return - } - s := cm.String() - if strings.Contains(s, ",") { - t.Errorf("should be space-separated values: %s", s) - return - } - } - writer := func(toggle bool) { - defer wg.Done() - cm := ipv4.ControlMessage{ - Src: net.IPv4(127, 0, 0, 1), - } - if ifi != nil { - cm.IfIndex = ifi.Index - } - if err := p.SetControlMessage(cf, toggle); err != nil { - t.Error(err) - return - } - n, err := p.WriteTo(data, &cm, dst) - if err != nil { - t.Error(err) - return - } - if n != len(data) { - t.Errorf("got %d; want %d", n, len(data)) - return - } - } - batchWriter := func(toggle bool) { - defer wg.Done() - cm := ipv4.ControlMessage{ - Src: net.IPv4(127, 0, 0, 1), - } - if ifi != nil { - cm.IfIndex = ifi.Index - } - if err := p.SetControlMessage(cf, toggle); err != nil { - t.Error(err) - return - } - ms := []ipv4.Message{ - { - Buffers: [][]byte{data}, - OOB: cm.Marshal(), - Addr: dst, - }, - } - n, err := p.WriteBatch(ms, 0) - if err != nil { - t.Error(err) - return - } - if n != len(ms) { - t.Errorf("got %d; want %d", n, len(ms)) - return - } - if ms[0].N != len(data) { - t.Errorf("got %d; want %d", ms[0].N, len(data)) - return - } - } - - const N = 10 - wg.Add(N) - for i := 0; i < N; i++ { - if batch { - go batchReader() - } else { - go reader() - } - } - wg.Add(2 * N) - for i := 0; i < 2*N; i++ { - if batch { - go batchWriter(i%2 != 0) - } else { - go writer(i%2 != 0) - } - - } - wg.Add(N) - for i := 0; i < N; i++ { - if batch { - go batchReader() - } else { - go reader() - } - } - wg.Wait() -} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_test.go b/vendor/golang.org/x/net/ipv4/readwrite_test.go deleted file mode 100644 index 3896a8ae..00000000 --- a/vendor/golang.org/x/net/ipv4/readwrite_test.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "bytes" - "net" - "runtime" - "strings" - "sync" - "testing" - - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" -) - -func BenchmarkReadWriteUnicast(b *testing.B) { - c, err := nettest.NewLocalPacketListener("udp4") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - - dst := c.LocalAddr() - wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) - - b.Run("NetUDP", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(wb, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(rb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("IPv4UDP", func(b *testing.B) { - p := ipv4.NewPacketConn(c) - cf := ipv4.FlagTTL | ipv4.FlagInterface - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - cm := ipv4.ControlMessage{TTL: 1} - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - if ifi != nil { - cm.IfIndex = ifi.Index - } - - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(wb, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(rb); err != nil { - b.Fatal(err) - } - } - }) -} - -func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - - c, err := nettest.NewLocalPacketListener("udp4") - if err != nil { - t.Fatal(err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - defer p.Close() - - dst := c.LocalAddr() - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface - wb := []byte("HELLO-R-U-THERE") - - if err := p.SetControlMessage(cf, true); err != nil { // probe before test - if nettest.ProtocolNotSupported(err) { - t.Skipf("not supported on %s", runtime.GOOS) - } - t.Fatal(err) - } - - var wg sync.WaitGroup - reader := func() { - defer wg.Done() - rb := make([]byte, 128) - if n, cm, _, err := p.ReadFrom(rb); err != nil { - t.Error(err) - return - } else if !bytes.Equal(rb[:n], wb) { - t.Errorf("got %v; want %v", rb[:n], wb) - return - } else { - s := cm.String() - if strings.Contains(s, ",") { - t.Errorf("should be space-separated values: %s", s) - } - } - } - writer := func(toggle bool) { - defer wg.Done() - cm := ipv4.ControlMessage{ - Src: net.IPv4(127, 0, 0, 1), - } - if ifi != nil { - cm.IfIndex = ifi.Index - } - if err := p.SetControlMessage(cf, toggle); err != nil { - t.Error(err) - return - } - if n, err := p.WriteTo(wb, &cm, dst); err != nil { - t.Error(err) - return - } else if n != len(wb) { - t.Errorf("got %d; want %d", n, len(wb)) - return - } - } - - const N = 10 - wg.Add(N) - for i := 0; i < N; i++ { - go reader() - } - wg.Add(2 * N) - for i := 0; i < 2*N; i++ { - go writer(i%2 != 0) - } - wg.Add(N) - for i := 0; i < N; i++ { - go reader() - } - wg.Wait() -} diff --git a/vendor/golang.org/x/net/ipv4/unicast_test.go b/vendor/golang.org/x/net/ipv4/unicast_test.go deleted file mode 100644 index 02c089f0..00000000 --- a/vendor/golang.org/x/net/ipv4/unicast_test.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "bytes" - "net" - "os" - "runtime" - "testing" - "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" -) - -func TestPacketConnReadWriteUnicastUDP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - c, err := nettest.NewLocalPacketListener("udp4") - if err != nil { - t.Fatal(err) - } - defer c.Close() - p := ipv4.NewPacketConn(c) - defer p.Close() - - dst := c.LocalAddr() - cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface - wb := []byte("HELLO-R-U-THERE") - - for i, toggle := range []bool{true, false, true} { - if err := p.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - p.SetTTL(i + 1) - if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if n, err := p.WriteTo(wb, nil, dst); err != nil { - t.Fatal(err) - } else if n != len(wb) { - t.Fatalf("got %v; want %v", n, len(wb)) - } - rb := make([]byte, 128) - if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if n, _, _, err := p.ReadFrom(rb); err != nil { - t.Fatal(err) - } else if !bytes.Equal(rb[:n], wb) { - t.Fatalf("got %v; want %v", rb[:n], wb) - } - } -} - -func TestPacketConnReadWriteUnicastICMP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") - if err != nil { - t.Fatal(err) - } - defer c.Close() - - dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") - if err != nil { - t.Fatal(err) - } - p := ipv4.NewPacketConn(c) - defer p.Close() - cf := ipv4.FlagDst | ipv4.FlagInterface - if runtime.GOOS != "solaris" { - // Solaris never allows to modify ICMP properties. - cf |= ipv4.FlagTTL - } - - for i, toggle := range []bool{true, false, true} { - wb, err := (&icmp.Message{ - Type: ipv4.ICMPTypeEcho, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: i + 1, - Data: []byte("HELLO-R-U-THERE"), - }, - }).Marshal(nil) - if err != nil { - t.Fatal(err) - } - if err := p.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - p.SetTTL(i + 1) - if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if n, err := p.WriteTo(wb, nil, dst); err != nil { - t.Fatal(err) - } else if n != len(wb) { - t.Fatalf("got %v; want %v", n, len(wb)) - } - rb := make([]byte, 128) - loop: - if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if n, _, _, err := p.ReadFrom(rb); err != nil { - switch runtime.GOOS { - case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } else { - m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) - if err != nil { - t.Fatal(err) - } - if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { - // On Linux we must handle own sent packets. - goto loop - } - if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { - t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) - } - } - } -} - -func TestRawConnReadWriteUnicastICMP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") - if err != nil { - t.Fatal(err) - } - defer c.Close() - - dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") - if err != nil { - t.Fatal(err) - } - r, err := ipv4.NewRawConn(c) - if err != nil { - t.Fatal(err) - } - defer r.Close() - cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface - - for i, toggle := range []bool{true, false, true} { - wb, err := (&icmp.Message{ - Type: ipv4.ICMPTypeEcho, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: i + 1, - Data: []byte("HELLO-R-U-THERE"), - }, - }).Marshal(nil) - if err != nil { - t.Fatal(err) - } - wh := &ipv4.Header{ - Version: ipv4.Version, - Len: ipv4.HeaderLen, - TOS: i + 1, - TotalLen: ipv4.HeaderLen + len(wb), - TTL: i + 1, - Protocol: 1, - Dst: dst.IP, - } - if err := r.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - if err := r.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if err := r.WriteTo(wh, wb, nil); err != nil { - t.Fatal(err) - } - rb := make([]byte, ipv4.HeaderLen+128) - loop: - if err := r.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if _, b, _, err := r.ReadFrom(rb); err != nil { - switch runtime.GOOS { - case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } else { - m, err := icmp.ParseMessage(iana.ProtocolICMP, b) - if err != nil { - t.Fatal(err) - } - if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { - // On Linux we must handle own sent packets. - goto loop - } - if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { - t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) - } - } - } -} diff --git a/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go deleted file mode 100644 index db5213b9..00000000 --- a/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv4_test - -import ( - "net" - "runtime" - "testing" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" -) - -func TestConnUnicastSocketOptions(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - ln, err := net.Listen("tcp4", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - defer ln.Close() - - errc := make(chan error, 1) - go func() { - c, err := ln.Accept() - if err != nil { - errc <- err - return - } - errc <- c.Close() - }() - - c, err := net.Dial("tcp4", ln.Addr().String()) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - testUnicastSocketOptions(t, ipv4.NewConn(c)) - - if err := <-errc; err != nil { - t.Errorf("server: %v", err) - } -} - -var packetConnUnicastSocketOptionTests = []struct { - net, proto, addr string -}{ - {"udp4", "", "127.0.0.1:0"}, - {"ip4", ":icmp", "127.0.0.1"}, -} - -func TestPacketConnUnicastSocketOptions(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - m, ok := nettest.SupportsRawIPSocket() - for _, tt := range packetConnUnicastSocketOptionTests { - if tt.net == "ip4" && !ok { - t.Log(m) - continue - } - c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - testUnicastSocketOptions(t, ipv4.NewPacketConn(c)) - } -} - -func TestRawConnUnicastSocketOptions(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") - if err != nil { - t.Fatal(err) - } - defer c.Close() - - r, err := ipv4.NewRawConn(c) - if err != nil { - t.Fatal(err) - } - - testUnicastSocketOptions(t, r) -} - -type testIPv4UnicastConn interface { - TOS() (int, error) - SetTOS(int) error - TTL() (int, error) - SetTTL(int) error -} - -func testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) { - tos := iana.DiffServCS0 | iana.NotECNTransport - switch runtime.GOOS { - case "windows": - // IP_TOS option is supported on Windows 8 and beyond. - t.Skipf("not supported on %s", runtime.GOOS) - } - - if err := c.SetTOS(tos); err != nil { - t.Fatal(err) - } - if v, err := c.TOS(); err != nil { - t.Fatal(err) - } else if v != tos { - t.Fatalf("got %v; want %v", v, tos) - } - const ttl = 255 - if err := c.SetTTL(ttl); err != nil { - t.Fatal(err) - } - if v, err := c.TTL(); err != nil { - t.Fatal(err) - } else if v != ttl { - t.Fatalf("got %v; want %v", v, ttl) - } -} diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go index 4f5fe683..10d64924 100644 --- a/vendor/golang.org/x/net/ipv6/batch.go +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -9,7 +9,6 @@ package ipv6 import ( "net" "runtime" - "syscall" "golang.org/x/net/internal/socket" ) @@ -67,7 +66,7 @@ type Message = socket.Message // On other platforms, this method will read only a single message. func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": @@ -98,7 +97,7 @@ func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { // On other platforms, this method will write only a single message. func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": diff --git a/vendor/golang.org/x/net/ipv6/bpf_test.go b/vendor/golang.org/x/net/ipv6/bpf_test.go deleted file mode 100644 index 8253e1f4..00000000 --- a/vendor/golang.org/x/net/ipv6/bpf_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "net" - "runtime" - "testing" - "time" - - "golang.org/x/net/bpf" - "golang.org/x/net/ipv6" -) - -func TestBPF(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - l, err := net.ListenPacket("udp6", "[::1]:0") - if err != nil { - t.Fatal(err) - } - defer l.Close() - - p := ipv6.NewPacketConn(l) - - // This filter accepts UDP packets whose first payload byte is - // even. - prog, err := bpf.Assemble([]bpf.Instruction{ - // Load the first byte of the payload (skipping UDP header). - bpf.LoadAbsolute{Off: 8, Size: 1}, - // Select LSB of the byte. - bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, - // Byte is even? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, - // Accept. - bpf.RetConstant{Val: 4096}, - // Ignore. - bpf.RetConstant{Val: 0}, - }) - if err != nil { - t.Fatalf("compiling BPF: %s", err) - } - - if err = p.SetBPF(prog); err != nil { - t.Fatalf("attaching filter to Conn: %s", err) - } - - s, err := net.Dial("udp6", l.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } - defer s.Close() - go func() { - for i := byte(0); i < 10; i++ { - s.Write([]byte{i}) - } - }() - - l.SetDeadline(time.Now().Add(2 * time.Second)) - seen := make([]bool, 5) - for { - var b [512]byte - n, _, err := l.ReadFrom(b[:]) - if err != nil { - t.Fatalf("reading from listener: %s", err) - } - if n != 1 { - t.Fatalf("unexpected packet length, want 1, got %d", n) - } - if b[0] >= 10 { - t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) - } - if b[0]%2 != 0 { - t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) - } - seen[b[0]/2] = true - - seenAll := true - for _, v := range seen { - if !v { - seenAll = false - break - } - } - if seenAll { - break - } - } -} diff --git a/vendor/golang.org/x/net/ipv6/control_test.go b/vendor/golang.org/x/net/ipv6/control_test.go deleted file mode 100644 index c186ca99..00000000 --- a/vendor/golang.org/x/net/ipv6/control_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "testing" - - "golang.org/x/net/ipv6" -) - -func TestControlMessageParseWithFuzz(t *testing.T) { - var cm ipv6.ControlMessage - for _, fuzz := range []string{ - "\f\x00\x00\x00)\x00\x00\x00.\x00\x00\x00", - "\f\x00\x00\x00)\x00\x00\x00,\x00\x00\x00", - } { - cm.Parse([]byte(fuzz)) - } -} diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go deleted file mode 100644 index 55ddc116..00000000 --- a/vendor/golang.org/x/net/ipv6/defs_darwin.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#define __APPLE_USE_RFC_3542 -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO - sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT - sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP - sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS - sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS - sysIPV6_2292RTHDR = C.IPV6_2292RTHDR - - sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_TCLASS = C.IPV6_TCLASS - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_MSFILTER = C.IPV6_MSFILTER - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysIPV6_BOUND_IF = C.IPV6_BOUND_IF - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go deleted file mode 100644 index a4c383a5..00000000 --- a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go deleted file mode 100644 index 53e62538..00000000 --- a/vendor/golang.org/x/net/ipv6/defs_freebsd.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_BINDANY = C.IPV6_BINDANY - - sysIPV6_MSFILTER = C.IPV6_MSFILTER - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go deleted file mode 100644 index 3308cb2c..00000000 --- a/vendor/golang.org/x/net/ipv6/defs_linux.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -const ( - sysIPV6_ADDRFORM = C.IPV6_ADDRFORM - sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO - sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS - sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS - sysIPV6_2292RTHDR = C.IPV6_2292RTHDR - sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_FLOWINFO = C.IPV6_FLOWINFO - - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP - sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_MSFILTER = C.MCAST_MSFILTER - sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT - sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER - sysIPV6_MTU = C.IPV6_MTU - sysIPV6_RECVERR = C.IPV6_RECVERR - sysIPV6_V6ONLY = C.IPV6_V6ONLY - sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST - sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST - - //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT - //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT - //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO - //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE - //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE - //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT - - sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR - sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RTHDR = C.IPV6_RTHDR - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_TCLASS = C.IPV6_TCLASS - - sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES - - sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP - sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC - sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT - sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA - sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME - sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA - sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA - - sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT - - sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR - sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR - sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT - sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF - - sysICMPV6_FILTER = C.ICMPV6_FILTER - - sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK - sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS - sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS - sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY - - sysSOL_SOCKET = C.SOL_SOCKET - sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - - sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter - - sizeofSockFprog = C.sizeof_struct_sock_fprog -) - -type kernelSockaddrStorage C.struct___kernel_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6FlowlabelReq C.struct_in6_flowlabel_req - -type ipv6Mreq C.struct_ipv6_mreq - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpv6Filter C.struct_icmp6_filter - -type sockFProg C.struct_sock_fprog - -type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go deleted file mode 100644 index be9ceb9c..00000000 --- a/vendor/golang.org/x/net/ipv6/defs_netbsd.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go deleted file mode 100644 index 177ddf87..00000000 --- a/vendor/golang.org/x/net/ipv6/defs_openbsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL - sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL - sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL - sysIPSEC6_OUTSA = C.IPSEC6_OUTSA - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - sysIPV6_PIPEX = C.IPV6_PIPEX - - sysIPV6_RTABLE = C.IPV6_RTABLE - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go deleted file mode 100644 index 0f8ce2b4..00000000 --- a/vendor/golang.org/x/net/ipv6/defs_solaris.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - - sysIPV6_RTHDR = C.IPV6_RTHDR - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - - sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - sysIPV6_SEC_OPT = C.IPV6_SEC_OPT - sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - - sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME - sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA - sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC - sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP - sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA - sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA - - sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK - sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT - sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK - sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT - sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK - sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT - - sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK - - sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT - - sysIPV6_BOUND_IF = C.IPV6_BOUND_IF - sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC - - sysICMP6_FILTER = C.ICMP6_FILTER - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go index 703dafe8..eea4fde2 100644 --- a/vendor/golang.org/x/net/ipv6/dgramopt.go +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -6,7 +6,6 @@ package ipv6 import ( "net" - "syscall" "golang.org/x/net/bpf" ) @@ -15,7 +14,7 @@ import ( // multicast packets. func (c *dgramOpt) MulticastHopLimit() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoMulticastHopLimit] if !ok { @@ -28,7 +27,7 @@ func (c *dgramOpt) MulticastHopLimit() (int, error) { // outgoing multicast packets. func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastHopLimit] if !ok { @@ -41,7 +40,7 @@ func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { // packet transmissions. func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { if !c.ok() { - return nil, syscall.EINVAL + return nil, errInvalidConn } so, ok := sockOpts[ssoMulticastInterface] if !ok { @@ -54,7 +53,7 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { // multicast packet transmissions. func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastInterface] if !ok { @@ -67,7 +66,7 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { // should be copied and send back to the originator. func (c *dgramOpt) MulticastLoopback() (bool, error) { if !c.ok() { - return false, syscall.EINVAL + return false, errInvalidConn } so, ok := sockOpts[ssoMulticastLoopback] if !ok { @@ -84,7 +83,7 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { // should be copied and send back to the originator. func (c *dgramOpt) SetMulticastLoopback(on bool) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastLoopback] if !ok { @@ -104,7 +103,7 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { // configuration. func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoJoinGroup] if !ok { @@ -122,7 +121,7 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { // source-specific group. func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoLeaveGroup] if !ok { @@ -143,7 +142,7 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { // routing configuration. func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoJoinSourceGroup] if !ok { @@ -164,7 +163,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net // interface ifi. func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoLeaveSourceGroup] if !ok { @@ -186,7 +185,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne // ifi. func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoBlockSourceGroup] if !ok { @@ -207,7 +206,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source // group by ExcludeSourceSpecificGroup again on the interface ifi. func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoUnblockSourceGroup] if !ok { @@ -230,7 +229,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source // field is located. func (c *dgramOpt) Checksum() (on bool, offset int, err error) { if !c.ok() { - return false, 0, syscall.EINVAL + return false, 0, errInvalidConn } so, ok := sockOpts[ssoChecksum] if !ok { @@ -251,7 +250,7 @@ func (c *dgramOpt) Checksum() (on bool, offset int, err error) { // checksum field is located. func (c *dgramOpt) SetChecksum(on bool, offset int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoChecksum] if !ok { @@ -266,7 +265,7 @@ func (c *dgramOpt) SetChecksum(on bool, offset int) error { // ICMPFilter returns an ICMP filter. func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { if !c.ok() { - return nil, syscall.EINVAL + return nil, errInvalidConn } so, ok := sockOpts[ssoICMPFilter] if !ok { @@ -278,7 +277,7 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { // SetICMPFilter deploys the ICMP filter. func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoICMPFilter] if !ok { @@ -292,7 +291,7 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { // Only supported on Linux. func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoAttachFilter] if !ok { diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go index 664a97de..e0be9d50 100644 --- a/vendor/golang.org/x/net/ipv6/doc.go +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -56,7 +56,7 @@ // Multicasting // // The options for multicasting are available for net.UDPConn and -// net.IPconn which are created as network connections that use the +// net.IPConn which are created as network connections that use the // IPv6 transport. A few network facilities must be prepared before // you begin multicasting, at a minimum joining network interfaces and // multicast groups. @@ -240,4 +240,4 @@ // IncludeSourceSpecificGroup may return an error. package ipv6 // import "golang.org/x/net/ipv6" -// BUG(mikio): This package is not implemented on NaCl and Plan 9. +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go index 0624c174..93257563 100644 --- a/vendor/golang.org/x/net/ipv6/endpoint.go +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -6,7 +6,6 @@ package ipv6 import ( "net" - "syscall" "time" "golang.org/x/net/internal/socket" @@ -34,7 +33,7 @@ func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } // with the endpoint. func (c *Conn) PathMTU() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoPathMTU] if !ok { @@ -76,7 +75,7 @@ func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } // socket options. func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) } @@ -85,7 +84,7 @@ func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { // endpoint. func (c *PacketConn) SetDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.SetDeadline(t) } @@ -94,7 +93,7 @@ func (c *PacketConn) SetDeadline(t time.Time) error { // endpoint. func (c *PacketConn) SetReadDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.SetReadDeadline(t) } @@ -103,7 +102,7 @@ func (c *PacketConn) SetReadDeadline(t time.Time) error { // endpoint. func (c *PacketConn) SetWriteDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.SetWriteDeadline(t) } @@ -111,7 +110,7 @@ func (c *PacketConn) SetWriteDeadline(t time.Time) error { // Close closes the endpoint. func (c *PacketConn) Close() error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.Close() } diff --git a/vendor/golang.org/x/net/ipv6/example_test.go b/vendor/golang.org/x/net/ipv6/example_test.go deleted file mode 100644 index e761aa2a..00000000 --- a/vendor/golang.org/x/net/ipv6/example_test.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "fmt" - "log" - "net" - "os" - "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/ipv6" -) - -func ExampleConn_markingTCP() { - ln, err := net.Listen("tcp", "[::]:1024") - if err != nil { - log.Fatal(err) - } - defer ln.Close() - - for { - c, err := ln.Accept() - if err != nil { - log.Fatal(err) - } - go func(c net.Conn) { - defer c.Close() - if c.RemoteAddr().(*net.TCPAddr).IP.To16() != nil && c.RemoteAddr().(*net.TCPAddr).IP.To4() == nil { - p := ipv6.NewConn(c) - if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11 - log.Fatal(err) - } - if err := p.SetHopLimit(128); err != nil { - log.Fatal(err) - } - } - if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { - log.Fatal(err) - } - }(c) - } -} - -func ExamplePacketConn_servingOneShotMulticastDNS() { - c, err := net.ListenPacket("udp6", "[::]:5353") // mDNS over UDP - if err != nil { - log.Fatal(err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - - en0, err := net.InterfaceByName("en0") - if err != nil { - log.Fatal(err) - } - mDNSLinkLocal := net.UDPAddr{IP: net.ParseIP("ff02::fb")} - if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { - log.Fatal(err) - } - defer p.LeaveGroup(en0, &mDNSLinkLocal) - if err := p.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { - log.Fatal(err) - } - - var wcm ipv6.ControlMessage - b := make([]byte, 1500) - for { - _, rcm, peer, err := p.ReadFrom(b) - if err != nil { - log.Fatal(err) - } - if !rcm.Dst.IsMulticast() || !rcm.Dst.Equal(mDNSLinkLocal.IP) { - continue - } - wcm.IfIndex = rcm.IfIndex - answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this - if _, err := p.WriteTo(answers, &wcm, peer); err != nil { - log.Fatal(err) - } - } -} - -func ExamplePacketConn_tracingIPPacketRoute() { - // Tracing an IP packet route to www.google.com. - - const host = "www.google.com" - ips, err := net.LookupIP(host) - if err != nil { - log.Fatal(err) - } - var dst net.IPAddr - for _, ip := range ips { - if ip.To16() != nil && ip.To4() == nil { - dst.IP = ip - fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) - break - } - } - if dst.IP == nil { - log.Fatal("no AAAA record found") - } - - c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6 - if err != nil { - log.Fatal(err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - - if err := p.SetControlMessage(ipv6.FlagHopLimit|ipv6.FlagSrc|ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { - log.Fatal(err) - } - wm := icmp.Message{ - Type: ipv6.ICMPTypeEchoRequest, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, - Data: []byte("HELLO-R-U-THERE"), - }, - } - var f ipv6.ICMPFilter - f.SetAll(true) - f.Accept(ipv6.ICMPTypeTimeExceeded) - f.Accept(ipv6.ICMPTypeEchoReply) - if err := p.SetICMPFilter(&f); err != nil { - log.Fatal(err) - } - - var wcm ipv6.ControlMessage - rb := make([]byte, 1500) - for i := 1; i <= 64; i++ { // up to 64 hops - wm.Body.(*icmp.Echo).Seq = i - wb, err := wm.Marshal(nil) - if err != nil { - log.Fatal(err) - } - - // In the real world usually there are several - // multiple traffic-engineered paths for each hop. - // You may need to probe a few times to each hop. - begin := time.Now() - wcm.HopLimit = i - if _, err := p.WriteTo(wb, &wcm, &dst); err != nil { - log.Fatal(err) - } - if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { - log.Fatal(err) - } - n, rcm, peer, err := p.ReadFrom(rb) - if err != nil { - if err, ok := err.(net.Error); ok && err.Timeout() { - fmt.Printf("%v\t*\n", i) - continue - } - log.Fatal(err) - } - rm, err := icmp.ParseMessage(58, rb[:n]) - if err != nil { - log.Fatal(err) - } - rtt := time.Since(begin) - - // In the real world you need to determine whether the - // received message is yours using ControlMessage.Src, - // ControlMesage.Dst, icmp.Echo.ID and icmp.Echo.Seq. - switch rm.Type { - case ipv6.ICMPTypeTimeExceeded: - names, _ := net.LookupAddr(peer.String()) - fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) - case ipv6.ICMPTypeEchoReply: - names, _ := net.LookupAddr(peer.String()) - fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) - return - } - } -} - -func ExamplePacketConn_advertisingOSPFHello() { - c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6 - if err != nil { - log.Fatal(err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - - en0, err := net.InterfaceByName("en0") - if err != nil { - log.Fatal(err) - } - allSPFRouters := net.IPAddr{IP: net.ParseIP("ff02::5")} - if err := p.JoinGroup(en0, &allSPFRouters); err != nil { - log.Fatal(err) - } - defer p.LeaveGroup(en0, &allSPFRouters) - - hello := make([]byte, 24) // fake hello data, you need to implement this - ospf := make([]byte, 16) // fake ospf header, you need to implement this - ospf[0] = 3 // version 3 - ospf[1] = 1 // hello packet - ospf = append(ospf, hello...) - if err := p.SetChecksum(true, 12); err != nil { - log.Fatal(err) - } - - cm := ipv6.ControlMessage{ - TrafficClass: 0xc0, // DSCP CS6 - HopLimit: 1, - IfIndex: en0.Index, - } - if _, err := p.WriteTo(ospf, &cm, &allSPFRouters); err != nil { - log.Fatal(err) - } -} diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go deleted file mode 100644 index 41886ec7..00000000 --- a/vendor/golang.org/x/net/ipv6/gen.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates system adaptation constants and types, -// internet protocol constants and tables by reading template files -// and IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "runtime" - "strconv" - "strings" -) - -func main() { - if err := genzsys(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := geniana(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func genzsys() error { - defs := "defs_" + runtime.GOOS + ".go" - f, err := os.Open(defs) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - f.Close() - cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) - b, err := cmd.Output() - if err != nil { - return err - } - b, err = format.Source(b) - if err != nil { - return err - } - zsys := "zsys_" + runtime.GOOS + ".go" - switch runtime.GOOS { - case "freebsd", "linux": - zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" - } - if err := ioutil.WriteFile(zsys, b, 0644); err != nil { - return err - } - return nil -} - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", - parseICMPv6Parameters, - }, -} - -func geniana() error { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") - fmt.Fprintf(&bb, "package ipv6\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) - } - if err := r.parse(&bb, resp.Body); err != nil { - return err - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - return err - } - if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { - return err - } - return nil -} - -func parseICMPv6Parameters(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var icp icmpv6Parameters - if err := dec.Decode(&icp); err != nil { - return err - } - prs := icp.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) - fmt.Fprintf(w, "// %s\n", pr.OrigName) - } - fmt.Fprintf(w, ")\n\n") - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) - } - fmt.Fprintf(w, "}\n") - return nil -} - -type icmpv6Parameters struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Value string `xml:"value"` - Name string `xml:"name"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonICMPv6ParamRecord struct { - OrigName string - Name string - Value int -} - -func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { - id := -1 - for i, r := range icp.Registries { - if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { - id = i - break - } - } - if id < 0 { - return nil - } - prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) - sr := strings.NewReplacer( - "Messages", "", - "Message", "", - "ICMP", "", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range icp.Registries[id].Records { - if strings.Contains(pr.Name, "Reserved") || - strings.Contains(pr.Name, "Unassigned") || - strings.Contains(pr.Name, "Deprecated") || - strings.Contains(pr.Name, "Experiment") || - strings.Contains(pr.Name, "experiment") { - continue - } - ss := strings.Split(pr.Name, "\n") - if len(ss) > 1 { - prs[i].Name = strings.Join(ss, " ") - } else { - prs[i].Name = ss[0] - } - s := strings.TrimSpace(prs[i].Name) - prs[i].OrigName = s - prs[i].Name = sr.Replace(s) - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go index e9dbc2e1..1a18f754 100644 --- a/vendor/golang.org/x/net/ipv6/genericopt.go +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -4,13 +4,11 @@ package ipv6 -import "syscall" - // TrafficClass returns the traffic class field value for outgoing // packets. func (c *genericOpt) TrafficClass() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoTrafficClass] if !ok { @@ -23,7 +21,7 @@ func (c *genericOpt) TrafficClass() (int, error) { // outgoing packets. func (c *genericOpt) SetTrafficClass(tclass int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoTrafficClass] if !ok { @@ -35,7 +33,7 @@ func (c *genericOpt) SetTrafficClass(tclass int) error { // HopLimit returns the hop limit field value for outgoing packets. func (c *genericOpt) HopLimit() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoHopLimit] if !ok { @@ -48,7 +46,7 @@ func (c *genericOpt) HopLimit() (int, error) { // packets. func (c *genericOpt) SetHopLimit(hoplim int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoHopLimit] if !ok { diff --git a/vendor/golang.org/x/net/ipv6/header_test.go b/vendor/golang.org/x/net/ipv6/header_test.go deleted file mode 100644 index ca11dc23..00000000 --- a/vendor/golang.org/x/net/ipv6/header_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "net" - "reflect" - "strings" - "testing" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/ipv6" -) - -var ( - wireHeaderFromKernel = [ipv6.HeaderLen]byte{ - 0x69, 0x8b, 0xee, 0xf1, - 0xca, 0xfe, 0x2c, 0x01, - 0x20, 0x01, 0x0d, 0xb8, - 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x20, 0x01, 0x0d, 0xb8, - 0x00, 0x02, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - } - - testHeader = &ipv6.Header{ - Version: ipv6.Version, - TrafficClass: iana.DiffServAF43, - FlowLabel: 0xbeef1, - PayloadLen: 0xcafe, - NextHeader: iana.ProtocolIPv6Frag, - HopLimit: 1, - Src: net.ParseIP("2001:db8:1::1"), - Dst: net.ParseIP("2001:db8:2::1"), - } -) - -func TestParseHeader(t *testing.T) { - h, err := ipv6.ParseHeader(wireHeaderFromKernel[:]) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(h, testHeader) { - t.Fatalf("got %#v; want %#v", h, testHeader) - } - s := h.String() - if strings.Contains(s, ",") { - t.Fatalf("should be space-separated values: %s", s) - } -} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go index 25974013..7ac53522 100644 --- a/vendor/golang.org/x/net/ipv6/helper.go +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -10,6 +10,7 @@ import ( ) var ( + errInvalidConn = errors.New("invalid connection") errMissingAddress = errors.New("missing address") errHeaderTooShort = errors.New("header too short") errInvalidConnType = errors.New("invalid conn type") diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go index 3c6214fb..32db1aa9 100644 --- a/vendor/golang.org/x/net/ipv6/iana.go +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -1,9 +1,9 @@ // go generate gen.go -// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. package ipv6 -// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 const ( ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big @@ -40,9 +40,11 @@ const ( ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation ICMPTypeMPLControl ICMPType = 159 // MPL Control Message + ICMPTypeExtendedEchoRequest ICMPType = 160 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 161 // Extended Echo Reply ) -// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 var icmpTypes = map[ICMPType]string{ 1: "destination unreachable", 2: "packet too big", @@ -79,4 +81,6 @@ var icmpTypes = map[ICMPType]string{ 157: "duplicate address request", 158: "duplicate address confirmation", 159: "mpl control message", + 160: "extended echo request", + 161: "extended echo reply", } diff --git a/vendor/golang.org/x/net/ipv6/icmp_test.go b/vendor/golang.org/x/net/ipv6/icmp_test.go deleted file mode 100644 index d8e9675d..00000000 --- a/vendor/golang.org/x/net/ipv6/icmp_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "net" - "reflect" - "runtime" - "testing" - - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -var icmpStringTests = []struct { - in ipv6.ICMPType - out string -}{ - {ipv6.ICMPTypeDestinationUnreachable, "destination unreachable"}, - - {256, ""}, -} - -func TestICMPString(t *testing.T) { - for _, tt := range icmpStringTests { - s := tt.in.String() - if s != tt.out { - t.Errorf("got %s; want %s", s, tt.out) - } - } -} - -func TestICMPFilter(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - - var f ipv6.ICMPFilter - for _, toggle := range []bool{false, true} { - f.SetAll(toggle) - for _, typ := range []ipv6.ICMPType{ - ipv6.ICMPTypeDestinationUnreachable, - ipv6.ICMPTypeEchoReply, - ipv6.ICMPTypeNeighborSolicitation, - ipv6.ICMPTypeDuplicateAddressConfirmation, - } { - f.Accept(typ) - if f.WillBlock(typ) { - t.Errorf("ipv6.ICMPFilter.Set(%v, false) failed", typ) - } - f.Block(typ) - if !f.WillBlock(typ) { - t.Errorf("ipv6.ICMPFilter.Set(%v, true) failed", typ) - } - } - } -} - -func TestSetICMPFilter(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") - if err != nil { - t.Fatal(err) - } - defer c.Close() - - p := ipv6.NewPacketConn(c) - - var f ipv6.ICMPFilter - f.SetAll(true) - f.Accept(ipv6.ICMPTypeEchoRequest) - f.Accept(ipv6.ICMPTypeEchoReply) - if err := p.SetICMPFilter(&f); err != nil { - t.Fatal(err) - } - kf, err := p.ICMPFilter() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(kf, &f) { - t.Fatalf("got %#v; want %#v", kf, f) - } -} diff --git a/vendor/golang.org/x/net/ipv6/mocktransponder_test.go b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go deleted file mode 100644 index 6efe56c6..00000000 --- a/vendor/golang.org/x/net/ipv6/mocktransponder_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "net" - "testing" -) - -func connector(t *testing.T, network, addr string, done chan<- bool) { - defer func() { done <- true }() - - c, err := net.Dial(network, addr) - if err != nil { - t.Error(err) - return - } - c.Close() -} - -func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { - defer func() { done <- true }() - - c, err := ln.Accept() - if err != nil { - t.Error(err) - return - } - c.Close() -} diff --git a/vendor/golang.org/x/net/ipv6/multicast_test.go b/vendor/golang.org/x/net/ipv6/multicast_test.go deleted file mode 100644 index 69a21cd3..00000000 --- a/vendor/golang.org/x/net/ipv6/multicast_test.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "bytes" - "net" - "os" - "runtime" - "testing" - "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -var packetConnReadWriteMulticastUDPTests = []struct { - addr string - grp, src *net.UDPAddr -}{ - {"[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 - - {"[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 -} - -func TestPacketConnReadWriteMulticastUDP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { - t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) - } - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - for _, tt := range packetConnReadWriteMulticastUDPTests { - c, err := net.ListenPacket("udp6", tt.addr) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - grp := *tt.grp - grp.Port = c.LocalAddr().(*net.UDPAddr).Port - p := ipv6.NewPacketConn(c) - defer p.Close() - if tt.src == nil { - if err := p.JoinGroup(ifi, &grp); err != nil { - t.Fatal(err) - } - defer p.LeaveGroup(ifi, &grp) - } else { - if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { - switch runtime.GOOS { - case "freebsd", "linux": - default: // platforms that don't support MLDv2 fail here - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) - } - if err := p.SetMulticastInterface(ifi); err != nil { - t.Fatal(err) - } - if _, err := p.MulticastInterface(); err != nil { - t.Fatal(err) - } - if err := p.SetMulticastLoopback(true); err != nil { - t.Fatal(err) - } - if _, err := p.MulticastLoopback(); err != nil { - t.Fatal(err) - } - - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - Src: net.IPv6loopback, - IfIndex: ifi.Index, - } - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - wb := []byte("HELLO-R-U-THERE") - - for i, toggle := range []bool{true, false, true} { - if err := p.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { - t.Fatal(err) - } - cm.HopLimit = i + 1 - if n, err := p.WriteTo(wb, &cm, &grp); err != nil { - t.Fatal(err) - } else if n != len(wb) { - t.Fatal(err) - } - rb := make([]byte, 128) - if n, _, _, err := p.ReadFrom(rb); err != nil { - t.Fatal(err) - } else if !bytes.Equal(rb[:n], wb) { - t.Fatalf("got %v; want %v", rb[:n], wb) - } - } - } -} - -var packetConnReadWriteMulticastICMPTests = []struct { - grp, src *net.IPAddr -}{ - {&net.IPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 - - {&net.IPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 -} - -func TestPacketConnReadWriteMulticastICMP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { - t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - for _, tt := range packetConnReadWriteMulticastICMPTests { - c, err := net.ListenPacket("ip6:ipv6-icmp", "::") - if err != nil { - t.Fatal(err) - } - defer c.Close() - - pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, tt.grp.IP) - p := ipv6.NewPacketConn(c) - defer p.Close() - if tt.src == nil { - if err := p.JoinGroup(ifi, tt.grp); err != nil { - t.Fatal(err) - } - defer p.LeaveGroup(ifi, tt.grp) - } else { - if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { - switch runtime.GOOS { - case "freebsd", "linux": - default: // platforms that don't support MLDv2 fail here - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) - } - if err := p.SetMulticastInterface(ifi); err != nil { - t.Fatal(err) - } - if _, err := p.MulticastInterface(); err != nil { - t.Fatal(err) - } - if err := p.SetMulticastLoopback(true); err != nil { - t.Fatal(err) - } - if _, err := p.MulticastLoopback(); err != nil { - t.Fatal(err) - } - - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - Src: net.IPv6loopback, - IfIndex: ifi.Index, - } - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - - var f ipv6.ICMPFilter - f.SetAll(true) - f.Accept(ipv6.ICMPTypeEchoReply) - if err := p.SetICMPFilter(&f); err != nil { - t.Fatal(err) - } - - var psh []byte - for i, toggle := range []bool{true, false, true} { - if toggle { - psh = nil - if err := p.SetChecksum(true, 2); err != nil { - // Solaris never allows to - // modify ICMP properties. - if runtime.GOOS != "solaris" { - t.Fatal(err) - } - } - } else { - psh = pshicmp - // Some platforms never allow to - // disable the kernel checksum - // processing. - p.SetChecksum(false, -1) - } - wb, err := (&icmp.Message{ - Type: ipv6.ICMPTypeEchoRequest, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: i + 1, - Data: []byte("HELLO-R-U-THERE"), - }, - }).Marshal(psh) - if err != nil { - t.Fatal(err) - } - if err := p.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { - t.Fatal(err) - } - cm.HopLimit = i + 1 - if n, err := p.WriteTo(wb, &cm, tt.grp); err != nil { - t.Fatal(err) - } else if n != len(wb) { - t.Fatalf("got %v; want %v", n, len(wb)) - } - rb := make([]byte, 128) - if n, _, _, err := p.ReadFrom(rb); err != nil { - switch runtime.GOOS { - case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } else { - if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { - t.Fatal(err) - } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { - t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) - } - } - } - } -} diff --git a/vendor/golang.org/x/net/ipv6/multicastlistener_test.go b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go deleted file mode 100644 index b27713e2..00000000 --- a/vendor/golang.org/x/net/ipv6/multicastlistener_test.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "net" - "runtime" - "testing" - - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -var udpMultipleGroupListenerTests = []net.Addr{ - &net.UDPAddr{IP: net.ParseIP("ff02::114")}, // see RFC 4727 - &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}, - &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}, -} - -func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - for _, gaddr := range udpMultipleGroupListenerTests { - c, err := net.ListenPacket("udp6", "[::]:0") // wildcard address with non-reusable port - if err != nil { - t.Fatal(err) - } - defer c.Close() - - p := ipv6.NewPacketConn(c) - var mift []*net.Interface - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - for i, ifi := range ift { - if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { - continue - } - if err := p.JoinGroup(&ifi, gaddr); err != nil { - t.Fatal(err) - } - mift = append(mift, &ift[i]) - } - for _, ifi := range mift { - if err := p.LeaveGroup(ifi, gaddr); err != nil { - t.Fatal(err) - } - } - } -} - -func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - for _, gaddr := range udpMultipleGroupListenerTests { - c1, err := net.ListenPacket("udp6", "[ff02::]:0") // wildcard address with reusable port - if err != nil { - t.Fatal(err) - } - defer c1.Close() - _, port, err := net.SplitHostPort(c1.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } - c2, err := net.ListenPacket("udp6", net.JoinHostPort("ff02::", port)) // wildcard address with reusable port - if err != nil { - t.Fatal(err) - } - defer c2.Close() - - var ps [2]*ipv6.PacketConn - ps[0] = ipv6.NewPacketConn(c1) - ps[1] = ipv6.NewPacketConn(c2) - var mift []*net.Interface - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - for i, ifi := range ift { - if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { - continue - } - for _, p := range ps { - if err := p.JoinGroup(&ifi, gaddr); err != nil { - t.Fatal(err) - } - } - mift = append(mift, &ift[i]) - } - for _, ifi := range mift { - for _, p := range ps { - if err := p.LeaveGroup(ifi, gaddr); err != nil { - t.Fatal(err) - } - } - } - } -} - -func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 - type ml struct { - c *ipv6.PacketConn - ifi *net.Interface - } - var mlt []*ml - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - port := "0" - for i, ifi := range ift { - ip, ok := nettest.IsMulticastCapable("ip6", &ifi) - if !ok { - continue - } - c, err := net.ListenPacket("udp6", net.JoinHostPort(ip.String()+"%"+ifi.Name, port)) // unicast address with non-reusable port - if err != nil { - // The listen may fail when the serivce is - // already in use, but it's fine because the - // purpose of this is not to test the - // bookkeeping of IP control block inside the - // kernel. - t.Log(err) - continue - } - defer c.Close() - if port == "0" { - _, port, err = net.SplitHostPort(c.LocalAddr().String()) - if err != nil { - t.Fatal(err) - } - } - p := ipv6.NewPacketConn(c) - if err := p.JoinGroup(&ifi, &gaddr); err != nil { - t.Fatal(err) - } - mlt = append(mlt, &ml{p, &ift[i]}) - } - for _, m := range mlt { - if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { - t.Fatal(err) - } - } -} - -func TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - c, err := net.ListenPacket("ip6:ipv6-icmp", "::") // wildcard address - if err != nil { - t.Fatal(err) - } - defer c.Close() - - p := ipv6.NewPacketConn(c) - gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 - var mift []*net.Interface - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - for i, ifi := range ift { - if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { - continue - } - if err := p.JoinGroup(&ifi, &gaddr); err != nil { - t.Fatal(err) - } - mift = append(mift, &ift[i]) - } - for _, ifi := range mift { - if err := p.LeaveGroup(ifi, &gaddr); err != nil { - t.Fatal(err) - } - } -} - -func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { - switch runtime.GOOS { - case "darwin", "dragonfly", "openbsd": // platforms that return fe80::1%lo0: bind: can't assign requested address - t.Skipf("not supported on %s", runtime.GOOS) - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 - type ml struct { - c *ipv6.PacketConn - ifi *net.Interface - } - var mlt []*ml - - ift, err := net.Interfaces() - if err != nil { - t.Fatal(err) - } - for i, ifi := range ift { - ip, ok := nettest.IsMulticastCapable("ip6", &ifi) - if !ok { - continue - } - c, err := net.ListenPacket("ip6:ipv6-icmp", ip.String()+"%"+ifi.Name) // unicast address - if err != nil { - t.Fatal(err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - if err := p.JoinGroup(&ifi, &gaddr); err != nil { - t.Fatal(err) - } - mlt = append(mlt, &ml{p, &ift[i]}) - } - for _, m := range mlt { - if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go deleted file mode 100644 index 9e6b902d..00000000 --- a/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "net" - "runtime" - "testing" - - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -var packetConnMulticastSocketOptionTests = []struct { - net, proto, addr string - grp, src net.Addr -}{ - {"udp6", "", "[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 - {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff02::115")}, nil}, // see RFC 4727 - - {"udp6", "", "[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 - {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff30::8000:2")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 -} - -func TestPacketConnMulticastSocketOptions(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) - if ifi == nil { - t.Skipf("not available on %s", runtime.GOOS) - } - - m, ok := nettest.SupportsRawIPSocket() - for _, tt := range packetConnMulticastSocketOptionTests { - if tt.net == "ip6" && !ok { - t.Log(m) - continue - } - c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) - if err != nil { - t.Fatal(err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - defer p.Close() - - if tt.src == nil { - testMulticastSocketOptions(t, p, ifi, tt.grp) - } else { - testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) - } - } -} - -type testIPv6MulticastConn interface { - MulticastHopLimit() (int, error) - SetMulticastHopLimit(ttl int) error - MulticastLoopback() (bool, error) - SetMulticastLoopback(bool) error - JoinGroup(*net.Interface, net.Addr) error - LeaveGroup(*net.Interface, net.Addr) error - JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error - LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error - ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error - IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error -} - -func testMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp net.Addr) { - const hoplim = 255 - if err := c.SetMulticastHopLimit(hoplim); err != nil { - t.Error(err) - return - } - if v, err := c.MulticastHopLimit(); err != nil { - t.Error(err) - return - } else if v != hoplim { - t.Errorf("got %v; want %v", v, hoplim) - return - } - - for _, toggle := range []bool{true, false} { - if err := c.SetMulticastLoopback(toggle); err != nil { - t.Error(err) - return - } - if v, err := c.MulticastLoopback(); err != nil { - t.Error(err) - return - } else if v != toggle { - t.Errorf("got %v; want %v", v, toggle) - return - } - } - - if err := c.JoinGroup(ifi, grp); err != nil { - t.Error(err) - return - } - if err := c.LeaveGroup(ifi, grp); err != nil { - t.Error(err) - return - } -} - -func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp, src net.Addr) { - // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP - if err := c.JoinGroup(ifi, grp); err != nil { - t.Error(err) - return - } - if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { - switch runtime.GOOS { - case "freebsd", "linux": - default: // platforms that don't support MLDv2 fail here - t.Logf("not supported on %s", runtime.GOOS) - return - } - t.Error(err) - return - } - if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { - t.Error(err) - return - } - if err := c.LeaveGroup(ifi, grp); err != nil { - t.Error(err) - return - } - - // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP - if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { - t.Error(err) - return - } - if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { - t.Error(err) - return - } - - // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP - if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { - t.Error(err) - return - } - if err := c.LeaveGroup(ifi, grp); err != nil { - t.Error(err) - return - } -} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go index 4ee4b062..3f23b5d2 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -2,14 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv6 -import ( - "net" - "syscall" -) +import "net" // ReadFrom reads a payload of the received IPv6 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -17,7 +14,7 @@ import ( // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { - return 0, nil, nil, syscall.EINVAL + return 0, nil, nil, errInvalidConn } return c.readFrom(b) } @@ -29,7 +26,7 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. // cm may be nil if control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } return c.writeTo(b, cm, dst) } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go index fdc6c399..bc4209db 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build !go1.9 -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go index 8f6d02e2..7dd65048 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build go1.9 -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index 99a43542..459142d2 100644 --- a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -2,14 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 windows +// +build js nacl plan9 windows package ipv6 -import ( - "net" - "syscall" -) +import "net" // ReadFrom reads a payload of the received IPv6 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -17,7 +14,7 @@ import ( // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { - return 0, nil, nil, syscall.EINVAL + return 0, nil, nil, errInvalidConn } if n, src, err = c.PacketConn.ReadFrom(b); err != nil { return 0, nil, nil, err @@ -32,7 +29,7 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. // cm may be nil if control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } if dst == nil { return 0, errMissingAddress diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go deleted file mode 100644 index c11d92ae..00000000 --- a/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package ipv6_test - -import ( - "bytes" - "fmt" - "net" - "runtime" - "strings" - "sync" - "testing" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - b.Skipf("not supported on %s", runtime.GOOS) - } - - payload := []byte("HELLO-R-U-THERE") - iph := []byte{ - 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, - 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - } - greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} - datagram := append(greh, append(iph, payload...)...) - bb := make([]byte, 128) - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - HopLimit: 1, - Src: net.IPv6loopback, - } - if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { - cm.IfIndex = ifi.Index - } - - b.Run("UDP", func(b *testing.B) { - c, err := nettest.NewLocalPacketListener("udp6") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - dst := c.LocalAddr() - cf := ipv6.FlagHopLimit | ipv6.FlagInterface - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - b.Run("Net", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(payload, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("ToFrom", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(payload, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - }) - b.Run("IP", func(b *testing.B) { - switch runtime.GOOS { - case "netbsd": - b.Skip("need to configure gre on netbsd") - case "openbsd": - b.Skip("net.inet.gre.allow=0 by default on openbsd") - } - - c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - dst := c.LocalAddr() - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - b.Run("Net", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(datagram, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("ToFrom", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(datagram, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - }) -} - -func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - - payload := []byte("HELLO-R-U-THERE") - iph := []byte{ - 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, - 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - } - greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} - datagram := append(greh, append(iph, payload...)...) - - t.Run("UDP", func(t *testing.T) { - c, err := nettest.NewLocalPacketListener("udp6") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - t.Run("ToFrom", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) - }) - }) - t.Run("IP", func(t *testing.T) { - switch runtime.GOOS { - case "netbsd": - t.Skip("need to configure gre on netbsd") - case "openbsd": - t.Skip("net.inet.gre.allow=0 by default on openbsd") - } - - c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - t.Run("ToFrom", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) - }) - }) -} - -func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr) { - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - - if err := p.SetControlMessage(cf, true); err != nil { // probe before test - if nettest.ProtocolNotSupported(err) { - t.Skipf("not supported on %s", runtime.GOOS) - } - t.Fatal(err) - } - - var wg sync.WaitGroup - reader := func() { - defer wg.Done() - b := make([]byte, 128) - n, cm, _, err := p.ReadFrom(b) - if err != nil { - t.Error(err) - return - } - if !bytes.Equal(b[:n], data) { - t.Errorf("got %#v; want %#v", b[:n], data) - return - } - s := cm.String() - if strings.Contains(s, ",") { - t.Errorf("should be space-separated values: %s", s) - return - } - } - writer := func(toggle bool) { - defer wg.Done() - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - HopLimit: 1, - Src: net.IPv6loopback, - } - if ifi != nil { - cm.IfIndex = ifi.Index - } - if err := p.SetControlMessage(cf, toggle); err != nil { - t.Error(err) - return - } - n, err := p.WriteTo(data, &cm, dst) - if err != nil { - t.Error(err) - return - } - if n != len(data) { - t.Errorf("got %d; want %d", n, len(data)) - return - } - } - - const N = 10 - wg.Add(N) - for i := 0; i < N; i++ { - go reader() - } - wg.Add(2 * N) - for i := 0; i < 2*N; i++ { - go writer(i%2 != 0) - - } - wg.Add(N) - for i := 0; i < N; i++ { - go reader() - } - wg.Wait() -} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go deleted file mode 100644 index e2fd7337..00000000 --- a/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package ipv6_test - -import ( - "bytes" - "fmt" - "net" - "runtime" - "strings" - "sync" - "testing" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - b.Skipf("not supported on %s", runtime.GOOS) - } - - payload := []byte("HELLO-R-U-THERE") - iph := []byte{ - 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, - 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - } - greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} - datagram := append(greh, append(iph, payload...)...) - bb := make([]byte, 128) - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - HopLimit: 1, - Src: net.IPv6loopback, - } - if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { - cm.IfIndex = ifi.Index - } - - b.Run("UDP", func(b *testing.B) { - c, err := nettest.NewLocalPacketListener("udp6") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - dst := c.LocalAddr() - cf := ipv6.FlagHopLimit | ipv6.FlagInterface - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - wms := []ipv6.Message{ - { - Buffers: [][]byte{payload}, - Addr: dst, - OOB: cm.Marshal(), - }, - } - rms := []ipv6.Message{ - { - Buffers: [][]byte{bb}, - OOB: ipv6.NewControlMessage(cf), - }, - } - b.Run("Net", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(payload, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("ToFrom", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(payload, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("Batch", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteBatch(wms, 0); err != nil { - b.Fatal(err) - } - if _, err := p.ReadBatch(rms, 0); err != nil { - b.Fatal(err) - } - } - }) - }) - b.Run("IP", func(b *testing.B) { - switch runtime.GOOS { - case "netbsd": - b.Skip("need to configure gre on netbsd") - case "openbsd": - b.Skip("net.inet.gre.allow=0 by default on openbsd") - } - - c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - dst := c.LocalAddr() - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - wms := []ipv6.Message{ - { - Buffers: [][]byte{datagram}, - Addr: dst, - OOB: cm.Marshal(), - }, - } - rms := []ipv6.Message{ - { - Buffers: [][]byte{bb}, - OOB: ipv6.NewControlMessage(cf), - }, - } - b.Run("Net", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(datagram, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("ToFrom", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(datagram, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(bb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("Batch", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := p.WriteBatch(wms, 0); err != nil { - b.Fatal(err) - } - if _, err := p.ReadBatch(rms, 0); err != nil { - b.Fatal(err) - } - } - }) - }) -} - -func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - - payload := []byte("HELLO-R-U-THERE") - iph := []byte{ - 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, - 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - } - greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} - datagram := append(greh, append(iph, payload...)...) - - t.Run("UDP", func(t *testing.T) { - c, err := nettest.NewLocalPacketListener("udp6") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - t.Run("ToFrom", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) - }) - t.Run("Batch", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) - }) - }) - t.Run("IP", func(t *testing.T) { - switch runtime.GOOS { - case "netbsd": - t.Skip("need to configure gre on netbsd") - case "openbsd": - t.Skip("net.inet.gre.allow=0 by default on openbsd") - } - - c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") - if err != nil { - t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - t.Run("ToFrom", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) - }) - t.Run("Batch", func(t *testing.T) { - testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) - }) - }) -} - -func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr, batch bool) { - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - - if err := p.SetControlMessage(cf, true); err != nil { // probe before test - if nettest.ProtocolNotSupported(err) { - t.Skipf("not supported on %s", runtime.GOOS) - } - t.Fatal(err) - } - - var wg sync.WaitGroup - reader := func() { - defer wg.Done() - b := make([]byte, 128) - n, cm, _, err := p.ReadFrom(b) - if err != nil { - t.Error(err) - return - } - if !bytes.Equal(b[:n], data) { - t.Errorf("got %#v; want %#v", b[:n], data) - return - } - s := cm.String() - if strings.Contains(s, ",") { - t.Errorf("should be space-separated values: %s", s) - return - } - } - batchReader := func() { - defer wg.Done() - ms := []ipv6.Message{ - { - Buffers: [][]byte{make([]byte, 128)}, - OOB: ipv6.NewControlMessage(cf), - }, - } - n, err := p.ReadBatch(ms, 0) - if err != nil { - t.Error(err) - return - } - if n != len(ms) { - t.Errorf("got %d; want %d", n, len(ms)) - return - } - var cm ipv6.ControlMessage - if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { - t.Error(err) - return - } - b := ms[0].Buffers[0][:ms[0].N] - if !bytes.Equal(b, data) { - t.Errorf("got %#v; want %#v", b, data) - return - } - s := cm.String() - if strings.Contains(s, ",") { - t.Errorf("should be space-separated values: %s", s) - return - } - } - writer := func(toggle bool) { - defer wg.Done() - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - HopLimit: 1, - Src: net.IPv6loopback, - } - if ifi != nil { - cm.IfIndex = ifi.Index - } - if err := p.SetControlMessage(cf, toggle); err != nil { - t.Error(err) - return - } - n, err := p.WriteTo(data, &cm, dst) - if err != nil { - t.Error(err) - return - } - if n != len(data) { - t.Errorf("got %d; want %d", n, len(data)) - return - } - } - batchWriter := func(toggle bool) { - defer wg.Done() - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - HopLimit: 1, - Src: net.IPv6loopback, - } - if ifi != nil { - cm.IfIndex = ifi.Index - } - if err := p.SetControlMessage(cf, toggle); err != nil { - t.Error(err) - return - } - ms := []ipv6.Message{ - { - Buffers: [][]byte{data}, - OOB: cm.Marshal(), - Addr: dst, - }, - } - n, err := p.WriteBatch(ms, 0) - if err != nil { - t.Error(err) - return - } - if n != len(ms) { - t.Errorf("got %d; want %d", n, len(ms)) - return - } - if ms[0].N != len(data) { - t.Errorf("got %d; want %d", ms[0].N, len(data)) - return - } - } - - const N = 10 - wg.Add(N) - for i := 0; i < N; i++ { - if batch { - go batchReader() - } else { - go reader() - } - } - wg.Add(2 * N) - for i := 0; i < 2*N; i++ { - if batch { - go batchWriter(i%2 != 0) - } else { - go writer(i%2 != 0) - } - } - wg.Add(N) - for i := 0; i < N; i++ { - if batch { - go batchReader() - } else { - go reader() - } - } - wg.Wait() -} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_test.go b/vendor/golang.org/x/net/ipv6/readwrite_test.go deleted file mode 100644 index 206b915c..00000000 --- a/vendor/golang.org/x/net/ipv6/readwrite_test.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "bytes" - "net" - "runtime" - "strings" - "sync" - "testing" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -func BenchmarkReadWriteUnicast(b *testing.B) { - c, err := nettest.NewLocalPacketListener("udp6") - if err != nil { - b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) - } - defer c.Close() - - dst := c.LocalAddr() - wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) - - b.Run("NetUDP", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if _, err := c.WriteTo(wb, dst); err != nil { - b.Fatal(err) - } - if _, _, err := c.ReadFrom(rb); err != nil { - b.Fatal(err) - } - } - }) - b.Run("IPv6UDP", func(b *testing.B) { - p := ipv6.NewPacketConn(c) - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - if err := p.SetControlMessage(cf, true); err != nil { - b.Fatal(err) - } - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - HopLimit: 1, - } - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) - if ifi != nil { - cm.IfIndex = ifi.Index - } - - for i := 0; i < b.N; i++ { - if _, err := p.WriteTo(wb, &cm, dst); err != nil { - b.Fatal(err) - } - if _, _, _, err := p.ReadFrom(rb); err != nil { - b.Fatal(err) - } - } - }) -} - -func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - c, err := nettest.NewLocalPacketListener("udp6") - if err != nil { - t.Fatal(err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - defer p.Close() - - dst := c.LocalAddr() - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - wb := []byte("HELLO-R-U-THERE") - - if err := p.SetControlMessage(cf, true); err != nil { // probe before test - if nettest.ProtocolNotSupported(err) { - t.Skipf("not supported on %s", runtime.GOOS) - } - t.Fatal(err) - } - - var wg sync.WaitGroup - reader := func() { - defer wg.Done() - rb := make([]byte, 128) - if n, cm, _, err := p.ReadFrom(rb); err != nil { - t.Error(err) - return - } else if !bytes.Equal(rb[:n], wb) { - t.Errorf("got %v; want %v", rb[:n], wb) - return - } else { - s := cm.String() - if strings.Contains(s, ",") { - t.Errorf("should be space-separated values: %s", s) - } - } - } - writer := func(toggle bool) { - defer wg.Done() - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - Src: net.IPv6loopback, - } - if ifi != nil { - cm.IfIndex = ifi.Index - } - if err := p.SetControlMessage(cf, toggle); err != nil { - t.Error(err) - return - } - if n, err := p.WriteTo(wb, &cm, dst); err != nil { - t.Error(err) - return - } else if n != len(wb) { - t.Errorf("got %d; want %d", n, len(wb)) - return - } - } - - const N = 10 - wg.Add(N) - for i := 0; i < N; i++ { - go reader() - } - wg.Add(2 * N) - for i := 0; i < 2*N; i++ { - go writer(i%2 != 0) - } - wg.Add(N) - for i := 0; i < N; i++ { - go reader() - } - wg.Wait() -} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_test.go b/vendor/golang.org/x/net/ipv6/sockopt_test.go deleted file mode 100644 index 774338db..00000000 --- a/vendor/golang.org/x/net/ipv6/sockopt_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "fmt" - "net" - "runtime" - "testing" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -var supportsIPv6 bool = nettest.SupportsIPv6() - -func TestConnInitiatorPathMTU(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - ln, err := net.Listen("tcp6", "[::1]:0") - if err != nil { - t.Fatal(err) - } - defer ln.Close() - - done := make(chan bool) - go acceptor(t, ln, done) - - c, err := net.Dial("tcp6", ln.Addr().String()) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { - switch runtime.GOOS { - case "darwin": // older darwin kernels don't support IPV6_PATHMTU option - t.Logf("not supported on %s", runtime.GOOS) - default: - t.Fatal(err) - } - } else { - t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) - } - - <-done -} - -func TestConnResponderPathMTU(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - ln, err := net.Listen("tcp6", "[::1]:0") - if err != nil { - t.Fatal(err) - } - defer ln.Close() - - done := make(chan bool) - go connector(t, "tcp6", ln.Addr().String(), done) - - c, err := ln.Accept() - if err != nil { - t.Fatal(err) - } - defer c.Close() - - if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { - switch runtime.GOOS { - case "darwin": // older darwin kernels don't support IPV6_PATHMTU option - t.Logf("not supported on %s", runtime.GOOS) - default: - t.Fatal(err) - } - } else { - t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) - } - - <-done -} - -func TestPacketConnChecksum(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6 - if err != nil { - t.Fatal(err) - } - defer c.Close() - - p := ipv6.NewPacketConn(c) - offset := 12 // see RFC 5340 - - for _, toggle := range []bool{false, true} { - if err := p.SetChecksum(toggle, offset); err != nil { - if toggle { - t.Fatalf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) - } else { - // Some platforms never allow to disable the kernel - // checksum processing. - t.Logf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) - } - } - if on, offset, err := p.Checksum(); err != nil { - t.Fatal(err) - } else { - t.Logf("kernel checksum processing enabled=%v, offset=%v", on, offset) - } - } -} diff --git a/vendor/golang.org/x/net/ipv6/unicast_test.go b/vendor/golang.org/x/net/ipv6/unicast_test.go deleted file mode 100644 index a0b7d955..00000000 --- a/vendor/golang.org/x/net/ipv6/unicast_test.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "bytes" - "net" - "os" - "runtime" - "testing" - "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -func TestPacketConnReadWriteUnicastUDP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - c, err := nettest.NewLocalPacketListener("udp6") - if err != nil { - t.Fatal(err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - defer p.Close() - - dst := c.LocalAddr() - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - Src: net.IPv6loopback, - } - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) - if ifi != nil { - cm.IfIndex = ifi.Index - } - wb := []byte("HELLO-R-U-THERE") - - for i, toggle := range []bool{true, false, true} { - if err := p.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - cm.HopLimit = i + 1 - if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if n, err := p.WriteTo(wb, &cm, dst); err != nil { - t.Fatal(err) - } else if n != len(wb) { - t.Fatalf("got %v; want %v", n, len(wb)) - } - rb := make([]byte, 128) - if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if n, _, _, err := p.ReadFrom(rb); err != nil { - t.Fatal(err) - } else if !bytes.Equal(rb[:n], wb) { - t.Fatalf("got %v; want %v", rb[:n], wb) - } - } -} - -func TestPacketConnReadWriteUnicastICMP(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") - if err != nil { - t.Fatal(err) - } - defer c.Close() - p := ipv6.NewPacketConn(c) - defer p.Close() - - dst, err := net.ResolveIPAddr("ip6", "::1") - if err != nil { - t.Fatal(err) - } - - pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, dst.IP) - cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, - Src: net.IPv6loopback, - } - cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU - ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) - if ifi != nil { - cm.IfIndex = ifi.Index - } - - var f ipv6.ICMPFilter - f.SetAll(true) - f.Accept(ipv6.ICMPTypeEchoReply) - if err := p.SetICMPFilter(&f); err != nil { - t.Fatal(err) - } - - var psh []byte - for i, toggle := range []bool{true, false, true} { - if toggle { - psh = nil - if err := p.SetChecksum(true, 2); err != nil { - // Solaris never allows to modify - // ICMP properties. - if runtime.GOOS != "solaris" { - t.Fatal(err) - } - } - } else { - psh = pshicmp - // Some platforms never allow to disable the - // kernel checksum processing. - p.SetChecksum(false, -1) - } - wb, err := (&icmp.Message{ - Type: ipv6.ICMPTypeEchoRequest, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: i + 1, - Data: []byte("HELLO-R-U-THERE"), - }, - }).Marshal(psh) - if err != nil { - t.Fatal(err) - } - if err := p.SetControlMessage(cf, toggle); err != nil { - if nettest.ProtocolNotSupported(err) { - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } - cm.HopLimit = i + 1 - if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if n, err := p.WriteTo(wb, &cm, dst); err != nil { - t.Fatal(err) - } else if n != len(wb) { - t.Fatalf("got %v; want %v", n, len(wb)) - } - rb := make([]byte, 128) - if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { - t.Fatal(err) - } - if n, _, _, err := p.ReadFrom(rb); err != nil { - switch runtime.GOOS { - case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket - t.Logf("not supported on %s", runtime.GOOS) - continue - } - t.Fatal(err) - } else { - if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { - t.Fatal(err) - } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { - t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) - } - } - } -} diff --git a/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go deleted file mode 100644 index e175dccf..00000000 --- a/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ipv6_test - -import ( - "net" - "runtime" - "testing" - - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv6" -) - -func TestConnUnicastSocketOptions(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - ln, err := net.Listen("tcp6", "[::1]:0") - if err != nil { - t.Fatal(err) - } - defer ln.Close() - - errc := make(chan error, 1) - go func() { - c, err := ln.Accept() - if err != nil { - errc <- err - return - } - errc <- c.Close() - }() - - c, err := net.Dial("tcp6", ln.Addr().String()) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - testUnicastSocketOptions(t, ipv6.NewConn(c)) - - if err := <-errc; err != nil { - t.Errorf("server: %v", err) - } -} - -var packetConnUnicastSocketOptionTests = []struct { - net, proto, addr string -}{ - {"udp6", "", "[::1]:0"}, - {"ip6", ":ipv6-icmp", "::1"}, -} - -func TestPacketConnUnicastSocketOptions(t *testing.T) { - switch runtime.GOOS { - case "nacl", "plan9", "windows": - t.Skipf("not supported on %s", runtime.GOOS) - } - if !supportsIPv6 { - t.Skip("ipv6 is not supported") - } - - m, ok := nettest.SupportsRawIPSocket() - for _, tt := range packetConnUnicastSocketOptionTests { - if tt.net == "ip6" && !ok { - t.Log(m) - continue - } - c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - testUnicastSocketOptions(t, ipv6.NewPacketConn(c)) - } -} - -type testIPv6UnicastConn interface { - TrafficClass() (int, error) - SetTrafficClass(int) error - HopLimit() (int, error) - SetHopLimit(int) error -} - -func testUnicastSocketOptions(t *testing.T, c testIPv6UnicastConn) { - tclass := iana.DiffServCS0 | iana.NotECNTransport - if err := c.SetTrafficClass(tclass); err != nil { - switch runtime.GOOS { - case "darwin": // older darwin kernels don't support IPV6_TCLASS option - t.Logf("not supported on %s", runtime.GOOS) - goto next - } - t.Fatal(err) - } - if v, err := c.TrafficClass(); err != nil { - t.Fatal(err) - } else if v != tclass { - t.Fatalf("got %v; want %v", v, tclass) - } - -next: - hoplim := 255 - if err := c.SetHopLimit(hoplim); err != nil { - t.Fatal(err) - } - if v, err := c.HopLimit(); err != nil { - t.Fatal(err) - } else if v != hoplim { - t.Fatalf("got %v; want %v", v, hoplim) - } -} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go deleted file mode 100644 index 20f2b894..00000000 --- a/vendor/golang.org/x/net/lex/httplex/httplex.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package httplex contains rules around lexical matters of various -// HTTP-related specifications. -// -// This package is shared by the standard library (which vendors it) -// and x/net/http2. It comes with no API stability promise. -package httplex - -import ( - "net" - "strings" - "unicode/utf8" - - "golang.org/x/net/idna" -) - -var isTokenTable = [127]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) -} - -// HeaderValuesContainsToken reports whether any string in values -// contains the provided token, ASCII case-insensitively. -func HeaderValuesContainsToken(values []string, token string) bool { - for _, v := range values { - if headerValueContainsToken(v, token) { - return true - } - } - return false -} - -// isOWS reports whether b is an optional whitespace byte, as defined -// by RFC 7230 section 3.2.3. -func isOWS(b byte) bool { return b == ' ' || b == '\t' } - -// trimOWS returns x with all optional whitespace removes from the -// beginning and end. -func trimOWS(x string) string { - // TODO: consider using strings.Trim(x, " \t") instead, - // if and when it's fast enough. See issue 10292. - // But this ASCII-only code will probably always beat UTF-8 - // aware code. - for len(x) > 0 && isOWS(x[0]) { - x = x[1:] - } - for len(x) > 0 && isOWS(x[len(x)-1]) { - x = x[:len(x)-1] - } - return x -} - -// headerValueContainsToken reports whether v (assumed to be a -// 0#element, in the ABNF extension described in RFC 7230 section 7) -// contains token amongst its comma-separated tokens, ASCII -// case-insensitively. -func headerValueContainsToken(v string, token string) bool { - v = trimOWS(v) - if comma := strings.IndexByte(v, ','); comma != -1 { - return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) - } - return tokenEqual(v, token) -} - -// lowerASCII returns the ASCII lowercase version of b. -func lowerASCII(b byte) byte { - if 'A' <= b && b <= 'Z' { - return b + ('a' - 'A') - } - return b -} - -// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. -func tokenEqual(t1, t2 string) bool { - if len(t1) != len(t2) { - return false - } - for i, b := range t1 { - if b >= utf8.RuneSelf { - // No UTF-8 or non-ASCII allowed in tokens. - return false - } - if lowerASCII(byte(b)) != lowerASCII(t2[i]) { - return false - } - } - return true -} - -// isLWS reports whether b is linear white space, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) -func isLWS(b byte) bool { return b == ' ' || b == '\t' } - -// isCTL reports whether b is a control byte, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = -func isCTL(b byte) bool { - const del = 0x7f // a CTL - return b < ' ' || b == del -} - -// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. -// HTTP/2 imposes the additional restriction that uppercase ASCII -// letters are not allowed. -// -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA -func ValidHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !IsTokenRune(r) { - return false - } - } - return true -} - -// ValidHostHeader reports whether h is a valid host header. -func ValidHostHeader(h string) bool { - // The latest spec is actually this: - // - // http://tools.ietf.org/html/rfc7230#section-5.4 - // Host = uri-host [ ":" port ] - // - // Where uri-host is: - // http://tools.ietf.org/html/rfc3986#section-3.2.2 - // - // But we're going to be much more lenient for now and just - // search for any byte that's not a valid byte in any of those - // expressions. - for i := 0; i < len(h); i++ { - if !validHostByte[h[i]] { - return false - } - } - return true -} - -// See the validHostHeader comment. -var validHostByte = [256]bool{ - '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, - '8': true, '9': true, - - 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, - 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, - 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, - 'y': true, 'z': true, - - 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, - 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, - 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, - 'Y': true, 'Z': true, - - '!': true, // sub-delims - '$': true, // sub-delims - '%': true, // pct-encoded (and used in IPv6 zones) - '&': true, // sub-delims - '(': true, // sub-delims - ')': true, // sub-delims - '*': true, // sub-delims - '+': true, // sub-delims - ',': true, // sub-delims - '-': true, // unreserved - '.': true, // unreserved - ':': true, // IPv6address + Host expression's optional port - ';': true, // sub-delims - '=': true, // sub-delims - '[': true, - '\'': true, // sub-delims - ']': true, - '_': true, // unreserved - '~': true, // unreserved -} - -// ValidHeaderFieldValue reports whether v is a valid "field-value" according to -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : -// -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = -// -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : -// -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = -// -// RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" -// -// http2 further says: "Similarly, HTTP/2 allows header field values -// that are not valid. While most of the values that can be encoded -// will not alter header field parsing, carriage return (CR, ASCII -// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII -// 0x0) might be exploited by an attacker if they are translated -// verbatim. Any request or response that contains a character not -// permitted in a header field value MUST be treated as malformed -// (Section 8.1.2.6). Valid characters are defined by the -// field-content ABNF rule in Section 3.2 of [RFC7230]." -// -// This function does not (yet?) properly handle the rejection of -// strings that begin or end with SP or HTAB. -func ValidHeaderFieldValue(v string) bool { - for i := 0; i < len(v); i++ { - b := v[i] - if isCTL(b) && !isLWS(b) { - return false - } - } - return true -} - -func isASCII(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} - -// PunycodeHostPort returns the IDNA Punycode version -// of the provided "host" or "host:port" string. -func PunycodeHostPort(v string) (string, error) { - if isASCII(v) { - return v, nil - } - - host, port, err := net.SplitHostPort(v) - if err != nil { - // The input 'v' argument was just a "host" argument, - // without a port. This error should not be returned - // to the caller. - host = v - port = "" - } - host, err = idna.ToASCII(host) - if err != nil { - // Non-UTF-8? Not representable in Punycode, in any - // case. - return "", err - } - if port == "" { - return host, nil - } - return net.JoinHostPort(host, port), nil -} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex_test.go b/vendor/golang.org/x/net/lex/httplex/httplex_test.go deleted file mode 100644 index f47adc93..00000000 --- a/vendor/golang.org/x/net/lex/httplex/httplex_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package httplex - -import ( - "testing" -) - -func isChar(c rune) bool { return c <= 127 } - -func isCtl(c rune) bool { return c <= 31 || c == 127 } - -func isSeparator(c rune) bool { - switch c { - case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': - return true - } - return false -} - -func TestIsToken(t *testing.T) { - for i := 0; i <= 130; i++ { - r := rune(i) - expected := isChar(r) && !isCtl(r) && !isSeparator(r) - if IsTokenRune(r) != expected { - t.Errorf("isToken(0x%x) = %v", r, !expected) - } - } -} - -func TestHeaderValuesContainsToken(t *testing.T) { - tests := []struct { - vals []string - token string - want bool - }{ - { - vals: []string{"foo"}, - token: "foo", - want: true, - }, - { - vals: []string{"bar", "foo"}, - token: "foo", - want: true, - }, - { - vals: []string{"foo"}, - token: "FOO", - want: true, - }, - { - vals: []string{"foo"}, - token: "bar", - want: false, - }, - { - vals: []string{" foo "}, - token: "FOO", - want: true, - }, - { - vals: []string{"foo,bar"}, - token: "FOO", - want: true, - }, - { - vals: []string{"bar,foo,bar"}, - token: "FOO", - want: true, - }, - { - vals: []string{"bar , foo"}, - token: "FOO", - want: true, - }, - { - vals: []string{"foo ,bar "}, - token: "FOO", - want: true, - }, - { - vals: []string{"bar, foo ,bar"}, - token: "FOO", - want: true, - }, - { - vals: []string{"bar , foo"}, - token: "FOO", - want: true, - }, - } - for _, tt := range tests { - got := HeaderValuesContainsToken(tt.vals, tt.token) - if got != tt.want { - t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want) - } - } -} - -func TestPunycodeHostPort(t *testing.T) { - tests := []struct { - in, want string - }{ - {"www.google.com", "www.google.com"}, - {"гофер.рф", "xn--c1ae0ajs.xn--p1ai"}, - {"bücher.de", "xn--bcher-kva.de"}, - {"bücher.de:8080", "xn--bcher-kva.de:8080"}, - {"[1::6]:8080", "[1::6]:8080"}, - } - for _, tt := range tests { - got, err := PunycodeHostPort(tt.in) - if tt.want != got || err != nil { - t.Errorf("PunycodeHostPort(%q) = %q, %v, want %q, nil", tt.in, got, err, tt.want) - } - } -} diff --git a/vendor/golang.org/x/net/lif/address.go b/vendor/golang.org/x/net/lif/address.go deleted file mode 100644 index afb957fd..00000000 --- a/vendor/golang.org/x/net/lif/address.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package lif - -import ( - "errors" - "unsafe" -) - -// An Addr represents an address associated with packet routing. -type Addr interface { - // Family returns an address family. - Family() int -} - -// An Inet4Addr represents an internet address for IPv4. -type Inet4Addr struct { - IP [4]byte // IP address - PrefixLen int // address prefix length -} - -// Family implements the Family method of Addr interface. -func (a *Inet4Addr) Family() int { return sysAF_INET } - -// An Inet6Addr represents an internet address for IPv6. -type Inet6Addr struct { - IP [16]byte // IP address - PrefixLen int // address prefix length - ZoneID int // zone identifier -} - -// Family implements the Family method of Addr interface. -func (a *Inet6Addr) Family() int { return sysAF_INET6 } - -// Addrs returns a list of interface addresses. -// -// The provided af must be an address family and name must be a data -// link name. The zero value of af or name means a wildcard. -func Addrs(af int, name string) ([]Addr, error) { - eps, err := newEndpoints(af) - if len(eps) == 0 { - return nil, err - } - defer func() { - for _, ep := range eps { - ep.close() - } - }() - lls, err := links(eps, name) - if len(lls) == 0 { - return nil, err - } - var as []Addr - for _, ll := range lls { - var lifr lifreq - for i := 0; i < len(ll.Name); i++ { - lifr.Name[i] = int8(ll.Name[i]) - } - for _, ep := range eps { - ioc := int64(sysSIOCGLIFADDR) - err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifr)) - if err != nil { - continue - } - sa := (*sockaddrStorage)(unsafe.Pointer(&lifr.Lifru[0])) - l := int(nativeEndian.Uint32(lifr.Lifru1[:4])) - if l == 0 { - continue - } - switch sa.Family { - case sysAF_INET: - a := &Inet4Addr{PrefixLen: l} - copy(a.IP[:], lifr.Lifru[4:8]) - as = append(as, a) - case sysAF_INET6: - a := &Inet6Addr{PrefixLen: l, ZoneID: int(nativeEndian.Uint32(lifr.Lifru[24:28]))} - copy(a.IP[:], lifr.Lifru[8:24]) - as = append(as, a) - } - } - } - return as, nil -} - -func parseLinkAddr(b []byte) ([]byte, error) { - nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) - l := 4 + nlen + alen + slen - if len(b) < l { - return nil, errors.New("invalid address") - } - b = b[4:] - var addr []byte - if nlen > 0 { - b = b[nlen:] - } - if alen > 0 { - addr = make([]byte, alen) - copy(addr, b[:alen]) - } - return addr, nil -} diff --git a/vendor/golang.org/x/net/lif/address_test.go b/vendor/golang.org/x/net/lif/address_test.go deleted file mode 100644 index a25f10b6..00000000 --- a/vendor/golang.org/x/net/lif/address_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package lif - -import ( - "fmt" - "testing" -) - -type addrFamily int - -func (af addrFamily) String() string { - switch af { - case sysAF_UNSPEC: - return "unspec" - case sysAF_INET: - return "inet4" - case sysAF_INET6: - return "inet6" - default: - return fmt.Sprintf("%d", af) - } -} - -const hexDigit = "0123456789abcdef" - -type llAddr []byte - -func (a llAddr) String() string { - if len(a) == 0 { - return "" - } - buf := make([]byte, 0, len(a)*3-1) - for i, b := range a { - if i > 0 { - buf = append(buf, ':') - } - buf = append(buf, hexDigit[b>>4]) - buf = append(buf, hexDigit[b&0xF]) - } - return string(buf) -} - -type ipAddr []byte - -func (a ipAddr) String() string { - if len(a) == 0 { - return "" - } - if len(a) == 4 { - return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) - } - if len(a) == 16 { - return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) - } - s := make([]byte, len(a)*2) - for i, tn := range a { - s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] - } - return string(s) -} - -func (a *Inet4Addr) String() string { - return fmt.Sprintf("(%s %s %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen) -} - -func (a *Inet6Addr) String() string { - return fmt.Sprintf("(%s %s %d %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen, a.ZoneID) -} - -type addrPack struct { - af int - as []Addr -} - -func addrPacks() ([]addrPack, error) { - var lastErr error - var aps []addrPack - for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { - as, err := Addrs(af, "") - if err != nil { - lastErr = err - continue - } - aps = append(aps, addrPack{af: af, as: as}) - } - return aps, lastErr -} - -func TestAddrs(t *testing.T) { - aps, err := addrPacks() - if len(aps) == 0 && err != nil { - t.Fatal(err) - } - lps, err := linkPacks() - if len(lps) == 0 && err != nil { - t.Fatal(err) - } - for _, lp := range lps { - n := 0 - for _, ll := range lp.lls { - as, err := Addrs(lp.af, ll.Name) - if err != nil { - t.Fatal(lp.af, ll.Name, err) - } - t.Logf("af=%s name=%s %v", addrFamily(lp.af), ll.Name, as) - n += len(as) - } - for _, ap := range aps { - if ap.af != lp.af { - continue - } - if n != len(ap.as) { - t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(ap.as)) - continue - } - } - } -} diff --git a/vendor/golang.org/x/net/lif/binary.go b/vendor/golang.org/x/net/lif/binary.go deleted file mode 100644 index 738a94f4..00000000 --- a/vendor/golang.org/x/net/lif/binary.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package lif - -// This file contains duplicates of encoding/binary package. -// -// This package is supposed to be used by the net package of standard -// library. Therefore the package set used in the package must be the -// same as net package. - -var ( - littleEndian binaryLittleEndian - bigEndian binaryBigEndian -) - -type binaryByteOrder interface { - Uint16([]byte) uint16 - Uint32([]byte) uint32 - Uint64([]byte) uint64 - PutUint16([]byte, uint16) - PutUint32([]byte, uint32) - PutUint64([]byte, uint64) -} - -type binaryLittleEndian struct{} - -func (binaryLittleEndian) Uint16(b []byte) uint16 { - _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 - return uint16(b[0]) | uint16(b[1])<<8 -} - -func (binaryLittleEndian) PutUint16(b []byte, v uint16) { - _ = b[1] // early bounds check to guarantee safety of writes below - b[0] = byte(v) - b[1] = byte(v >> 8) -} - -func (binaryLittleEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func (binaryLittleEndian) PutUint32(b []byte, v uint32) { - _ = b[3] // early bounds check to guarantee safety of writes below - b[0] = byte(v) - b[1] = byte(v >> 8) - b[2] = byte(v >> 16) - b[3] = byte(v >> 24) -} - -func (binaryLittleEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func (binaryLittleEndian) PutUint64(b []byte, v uint64) { - _ = b[7] // early bounds check to guarantee safety of writes below - b[0] = byte(v) - b[1] = byte(v >> 8) - b[2] = byte(v >> 16) - b[3] = byte(v >> 24) - b[4] = byte(v >> 32) - b[5] = byte(v >> 40) - b[6] = byte(v >> 48) - b[7] = byte(v >> 56) -} - -type binaryBigEndian struct{} - -func (binaryBigEndian) Uint16(b []byte) uint16 { - _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 - return uint16(b[1]) | uint16(b[0])<<8 -} - -func (binaryBigEndian) PutUint16(b []byte, v uint16) { - _ = b[1] // early bounds check to guarantee safety of writes below - b[0] = byte(v >> 8) - b[1] = byte(v) -} - -func (binaryBigEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 -} - -func (binaryBigEndian) PutUint32(b []byte, v uint32) { - _ = b[3] // early bounds check to guarantee safety of writes below - b[0] = byte(v >> 24) - b[1] = byte(v >> 16) - b[2] = byte(v >> 8) - b[3] = byte(v) -} - -func (binaryBigEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | - uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 -} - -func (binaryBigEndian) PutUint64(b []byte, v uint64) { - _ = b[7] // early bounds check to guarantee safety of writes below - b[0] = byte(v >> 56) - b[1] = byte(v >> 48) - b[2] = byte(v >> 40) - b[3] = byte(v >> 32) - b[4] = byte(v >> 24) - b[5] = byte(v >> 16) - b[6] = byte(v >> 8) - b[7] = byte(v) -} diff --git a/vendor/golang.org/x/net/lif/defs_solaris.go b/vendor/golang.org/x/net/lif/defs_solaris.go deleted file mode 100644 index 02c19981..00000000 --- a/vendor/golang.org/x/net/lif/defs_solaris.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package lif - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_INET6 = C.AF_INET6 - - sysSOCK_DGRAM = C.SOCK_DGRAM -) - -type sockaddrStorage C.struct_sockaddr_storage - -const ( - sysLIFC_NOXMIT = C.LIFC_NOXMIT - sysLIFC_EXTERNAL_SOURCE = C.LIFC_EXTERNAL_SOURCE - sysLIFC_TEMPORARY = C.LIFC_TEMPORARY - sysLIFC_ALLZONES = C.LIFC_ALLZONES - sysLIFC_UNDER_IPMP = C.LIFC_UNDER_IPMP - sysLIFC_ENABLED = C.LIFC_ENABLED - - sysSIOCGLIFADDR = C.SIOCGLIFADDR - sysSIOCGLIFDSTADDR = C.SIOCGLIFDSTADDR - sysSIOCGLIFFLAGS = C.SIOCGLIFFLAGS - sysSIOCGLIFMTU = C.SIOCGLIFMTU - sysSIOCGLIFNETMASK = C.SIOCGLIFNETMASK - sysSIOCGLIFMETRIC = C.SIOCGLIFMETRIC - sysSIOCGLIFNUM = C.SIOCGLIFNUM - sysSIOCGLIFINDEX = C.SIOCGLIFINDEX - sysSIOCGLIFSUBNET = C.SIOCGLIFSUBNET - sysSIOCGLIFLNKINFO = C.SIOCGLIFLNKINFO - sysSIOCGLIFCONF = C.SIOCGLIFCONF - sysSIOCGLIFHWADDR = C.SIOCGLIFHWADDR -) - -const ( - sysIFF_UP = C.IFF_UP - sysIFF_BROADCAST = C.IFF_BROADCAST - sysIFF_DEBUG = C.IFF_DEBUG - sysIFF_LOOPBACK = C.IFF_LOOPBACK - sysIFF_POINTOPOINT = C.IFF_POINTOPOINT - sysIFF_NOTRAILERS = C.IFF_NOTRAILERS - sysIFF_RUNNING = C.IFF_RUNNING - sysIFF_NOARP = C.IFF_NOARP - sysIFF_PROMISC = C.IFF_PROMISC - sysIFF_ALLMULTI = C.IFF_ALLMULTI - sysIFF_INTELLIGENT = C.IFF_INTELLIGENT - sysIFF_MULTICAST = C.IFF_MULTICAST - sysIFF_MULTI_BCAST = C.IFF_MULTI_BCAST - sysIFF_UNNUMBERED = C.IFF_UNNUMBERED - sysIFF_PRIVATE = C.IFF_PRIVATE -) - -const ( - sizeofLifnum = C.sizeof_struct_lifnum - sizeofLifreq = C.sizeof_struct_lifreq - sizeofLifconf = C.sizeof_struct_lifconf - sizeofLifIfinfoReq = C.sizeof_struct_lif_ifinfo_req -) - -type lifnum C.struct_lifnum - -type lifreq C.struct_lifreq - -type lifconf C.struct_lifconf - -type lifIfinfoReq C.struct_lif_ifinfo_req - -const ( - sysIFT_IPV4 = C.IFT_IPV4 - sysIFT_IPV6 = C.IFT_IPV6 - sysIFT_6TO4 = C.IFT_6TO4 -) diff --git a/vendor/golang.org/x/net/lif/lif.go b/vendor/golang.org/x/net/lif/lif.go deleted file mode 100644 index 6e81f81f..00000000 --- a/vendor/golang.org/x/net/lif/lif.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -// Package lif provides basic functions for the manipulation of -// logical network interfaces and interface addresses on Solaris. -// -// The package supports Solaris 11 or above. -package lif - -import "syscall" - -type endpoint struct { - af int - s uintptr -} - -func (ep *endpoint) close() error { - return syscall.Close(int(ep.s)) -} - -func newEndpoints(af int) ([]endpoint, error) { - var lastErr error - var eps []endpoint - afs := []int{sysAF_INET, sysAF_INET6} - if af != sysAF_UNSPEC { - afs = []int{af} - } - for _, af := range afs { - s, err := syscall.Socket(af, sysSOCK_DGRAM, 0) - if err != nil { - lastErr = err - continue - } - eps = append(eps, endpoint{af: af, s: uintptr(s)}) - } - if len(eps) == 0 { - return nil, lastErr - } - return eps, nil -} diff --git a/vendor/golang.org/x/net/lif/link.go b/vendor/golang.org/x/net/lif/link.go deleted file mode 100644 index 913a53e1..00000000 --- a/vendor/golang.org/x/net/lif/link.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package lif - -import "unsafe" - -// A Link represents logical data link information. -// -// It also represents base information for logical network interface. -// On Solaris, each logical network interface represents network layer -// adjacency information and the interface has a only single network -// address or address pair for tunneling. It's usual that multiple -// logical network interfaces share the same logical data link. -type Link struct { - Name string // name, equivalent to IP interface name - Index int // index, equivalent to IP interface index - Type int // type - Flags int // flags - MTU int // maximum transmission unit, basically link MTU but may differ between IP address families - Addr []byte // address -} - -func (ll *Link) fetch(s uintptr) { - var lifr lifreq - for i := 0; i < len(ll.Name); i++ { - lifr.Name[i] = int8(ll.Name[i]) - } - ioc := int64(sysSIOCGLIFINDEX) - if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { - ll.Index = int(nativeEndian.Uint32(lifr.Lifru[:4])) - } - ioc = int64(sysSIOCGLIFFLAGS) - if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { - ll.Flags = int(nativeEndian.Uint64(lifr.Lifru[:8])) - } - ioc = int64(sysSIOCGLIFMTU) - if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { - ll.MTU = int(nativeEndian.Uint32(lifr.Lifru[:4])) - } - switch ll.Type { - case sysIFT_IPV4, sysIFT_IPV6, sysIFT_6TO4: - default: - ioc = int64(sysSIOCGLIFHWADDR) - if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { - ll.Addr, _ = parseLinkAddr(lifr.Lifru[4:]) - } - } -} - -// Links returns a list of logical data links. -// -// The provided af must be an address family and name must be a data -// link name. The zero value of af or name means a wildcard. -func Links(af int, name string) ([]Link, error) { - eps, err := newEndpoints(af) - if len(eps) == 0 { - return nil, err - } - defer func() { - for _, ep := range eps { - ep.close() - } - }() - return links(eps, name) -} - -func links(eps []endpoint, name string) ([]Link, error) { - var lls []Link - lifn := lifnum{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} - lifc := lifconf{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} - for _, ep := range eps { - lifn.Family = uint16(ep.af) - ioc := int64(sysSIOCGLIFNUM) - if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifn)); err != nil { - continue - } - if lifn.Count == 0 { - continue - } - b := make([]byte, lifn.Count*sizeofLifreq) - lifc.Family = uint16(ep.af) - lifc.Len = lifn.Count * sizeofLifreq - if len(lifc.Lifcu) == 8 { - nativeEndian.PutUint64(lifc.Lifcu[:], uint64(uintptr(unsafe.Pointer(&b[0])))) - } else { - nativeEndian.PutUint32(lifc.Lifcu[:], uint32(uintptr(unsafe.Pointer(&b[0])))) - } - ioc = int64(sysSIOCGLIFCONF) - if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifc)); err != nil { - continue - } - nb := make([]byte, 32) // see LIFNAMSIZ in net/if.h - for i := 0; i < int(lifn.Count); i++ { - lifr := (*lifreq)(unsafe.Pointer(&b[i*sizeofLifreq])) - for i := 0; i < 32; i++ { - if lifr.Name[i] == 0 { - nb = nb[:i] - break - } - nb[i] = byte(lifr.Name[i]) - } - llname := string(nb) - nb = nb[:32] - if isDupLink(lls, llname) || name != "" && name != llname { - continue - } - ll := Link{Name: llname, Type: int(lifr.Type)} - ll.fetch(ep.s) - lls = append(lls, ll) - } - } - return lls, nil -} - -func isDupLink(lls []Link, name string) bool { - for _, ll := range lls { - if ll.Name == name { - return true - } - } - return false -} diff --git a/vendor/golang.org/x/net/lif/link_test.go b/vendor/golang.org/x/net/lif/link_test.go deleted file mode 100644 index 0cb9b95c..00000000 --- a/vendor/golang.org/x/net/lif/link_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package lif - -import ( - "fmt" - "testing" -) - -func (ll *Link) String() string { - return fmt.Sprintf("name=%s index=%d type=%d flags=%#x mtu=%d addr=%v", ll.Name, ll.Index, ll.Type, ll.Flags, ll.MTU, llAddr(ll.Addr)) -} - -type linkPack struct { - af int - lls []Link -} - -func linkPacks() ([]linkPack, error) { - var lastErr error - var lps []linkPack - for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { - lls, err := Links(af, "") - if err != nil { - lastErr = err - continue - } - lps = append(lps, linkPack{af: af, lls: lls}) - } - return lps, lastErr -} - -func TestLinks(t *testing.T) { - lps, err := linkPacks() - if len(lps) == 0 && err != nil { - t.Fatal(err) - } - for _, lp := range lps { - n := 0 - for _, sll := range lp.lls { - lls, err := Links(lp.af, sll.Name) - if err != nil { - t.Fatal(lp.af, sll.Name, err) - } - for _, ll := range lls { - if ll.Name != sll.Name || ll.Index != sll.Index { - t.Errorf("af=%s got %v; want %v", addrFamily(lp.af), &ll, &sll) - continue - } - t.Logf("af=%s name=%s %v", addrFamily(lp.af), sll.Name, &ll) - n++ - } - } - if n != len(lp.lls) { - t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(lp.lls)) - continue - } - } -} diff --git a/vendor/golang.org/x/net/lif/sys.go b/vendor/golang.org/x/net/lif/sys.go deleted file mode 100644 index c896041b..00000000 --- a/vendor/golang.org/x/net/lif/sys.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package lif - -import "unsafe" - -var nativeEndian binaryByteOrder - -func init() { - i := uint32(1) - b := (*[4]byte)(unsafe.Pointer(&i)) - if b[0] == 1 { - nativeEndian = littleEndian - } else { - nativeEndian = bigEndian - } -} diff --git a/vendor/golang.org/x/net/lif/sys_solaris_amd64.s b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s deleted file mode 100644 index 39d76af7..00000000 --- a/vendor/golang.org/x/net/lif/sys_solaris_amd64.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -TEXT ·sysvicall6(SB),NOSPLIT,$0-88 - JMP syscall·sysvicall6(SB) diff --git a/vendor/golang.org/x/net/lif/syscall.go b/vendor/golang.org/x/net/lif/syscall.go deleted file mode 100644 index aadab2e1..00000000 --- a/vendor/golang.org/x/net/lif/syscall.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package lif - -import ( - "syscall" - "unsafe" -) - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - -//go:linkname procIoctl libc_ioctl - -var procIoctl uintptr - -func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) - -func ioctl(s, ioc uintptr, arg unsafe.Pointer) error { - _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procIoctl)), 3, s, ioc, uintptr(arg), 0, 0, 0) - if errno != 0 { - return error(errno) - } - return nil -} diff --git a/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go b/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go deleted file mode 100644 index b5e999be..00000000 --- a/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go +++ /dev/null @@ -1,103 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs defs_solaris.go - -package lif - -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1a - - sysSOCK_DGRAM = 0x1 -) - -type sockaddrStorage struct { - Family uint16 - X_ss_pad1 [6]int8 - X_ss_align float64 - X_ss_pad2 [240]int8 -} - -const ( - sysLIFC_NOXMIT = 0x1 - sysLIFC_EXTERNAL_SOURCE = 0x2 - sysLIFC_TEMPORARY = 0x4 - sysLIFC_ALLZONES = 0x8 - sysLIFC_UNDER_IPMP = 0x10 - sysLIFC_ENABLED = 0x20 - - sysSIOCGLIFADDR = -0x3f87968f - sysSIOCGLIFDSTADDR = -0x3f87968d - sysSIOCGLIFFLAGS = -0x3f87968b - sysSIOCGLIFMTU = -0x3f879686 - sysSIOCGLIFNETMASK = -0x3f879683 - sysSIOCGLIFMETRIC = -0x3f879681 - sysSIOCGLIFNUM = -0x3ff3967e - sysSIOCGLIFINDEX = -0x3f87967b - sysSIOCGLIFSUBNET = -0x3f879676 - sysSIOCGLIFLNKINFO = -0x3f879674 - sysSIOCGLIFCONF = -0x3fef965b - sysSIOCGLIFHWADDR = -0x3f879640 -) - -const ( - sysIFF_UP = 0x1 - sysIFF_BROADCAST = 0x2 - sysIFF_DEBUG = 0x4 - sysIFF_LOOPBACK = 0x8 - sysIFF_POINTOPOINT = 0x10 - sysIFF_NOTRAILERS = 0x20 - sysIFF_RUNNING = 0x40 - sysIFF_NOARP = 0x80 - sysIFF_PROMISC = 0x100 - sysIFF_ALLMULTI = 0x200 - sysIFF_INTELLIGENT = 0x400 - sysIFF_MULTICAST = 0x800 - sysIFF_MULTI_BCAST = 0x1000 - sysIFF_UNNUMBERED = 0x2000 - sysIFF_PRIVATE = 0x8000 -) - -const ( - sizeofLifnum = 0xc - sizeofLifreq = 0x178 - sizeofLifconf = 0x18 - sizeofLifIfinfoReq = 0x10 -) - -type lifnum struct { - Family uint16 - Pad_cgo_0 [2]byte - Flags int32 - Count int32 -} - -type lifreq struct { - Name [32]int8 - Lifru1 [4]byte - Type uint32 - Lifru [336]byte -} - -type lifconf struct { - Family uint16 - Pad_cgo_0 [2]byte - Flags int32 - Len int32 - Pad_cgo_1 [4]byte - Lifcu [8]byte -} - -type lifIfinfoReq struct { - Maxhops uint8 - Pad_cgo_0 [3]byte - Reachtime uint32 - Reachretrans uint32 - Maxmtu uint32 -} - -const ( - sysIFT_IPV4 = 0xc8 - sysIFT_IPV6 = 0xc9 - sysIFT_6TO4 = 0xca -) diff --git a/vendor/golang.org/x/net/nettest/conntest.go b/vendor/golang.org/x/net/nettest/conntest.go deleted file mode 100644 index 5bd3a8c6..00000000 --- a/vendor/golang.org/x/net/nettest/conntest.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package nettest provides utilities for network testing. -package nettest - -import ( - "bytes" - "encoding/binary" - "io" - "io/ioutil" - "math/rand" - "net" - "runtime" - "sync" - "testing" - "time" -) - -var ( - aLongTimeAgo = time.Unix(233431200, 0) - neverTimeout = time.Time{} -) - -// MakePipe creates a connection between two endpoints and returns the pair -// as c1 and c2, such that anything written to c1 is read by c2 and vice-versa. -// The stop function closes all resources, including c1, c2, and the underlying -// net.Listener (if there is one), and should not be nil. -type MakePipe func() (c1, c2 net.Conn, stop func(), err error) - -// TestConn tests that a net.Conn implementation properly satisfies the interface. -// The tests should not produce any false positives, but may experience -// false negatives. Thus, some issues may only be detected when the test is -// run multiple times. For maximal effectiveness, run the tests under the -// race detector. -func TestConn(t *testing.T, mp MakePipe) { - testConn(t, mp) -} - -type connTester func(t *testing.T, c1, c2 net.Conn) - -func timeoutWrapper(t *testing.T, mp MakePipe, f connTester) { - c1, c2, stop, err := mp() - if err != nil { - t.Fatalf("unable to make pipe: %v", err) - } - var once sync.Once - defer once.Do(func() { stop() }) - timer := time.AfterFunc(time.Minute, func() { - once.Do(func() { - t.Error("test timed out; terminating pipe") - stop() - }) - }) - defer timer.Stop() - f(t, c1, c2) -} - -// testBasicIO tests that the data sent on c1 is properly received on c2. -func testBasicIO(t *testing.T, c1, c2 net.Conn) { - want := make([]byte, 1<<20) - rand.New(rand.NewSource(0)).Read(want) - - dataCh := make(chan []byte) - go func() { - rd := bytes.NewReader(want) - if err := chunkedCopy(c1, rd); err != nil { - t.Errorf("unexpected c1.Write error: %v", err) - } - if err := c1.Close(); err != nil { - t.Errorf("unexpected c1.Close error: %v", err) - } - }() - - go func() { - wr := new(bytes.Buffer) - if err := chunkedCopy(wr, c2); err != nil { - t.Errorf("unexpected c2.Read error: %v", err) - } - if err := c2.Close(); err != nil { - t.Errorf("unexpected c2.Close error: %v", err) - } - dataCh <- wr.Bytes() - }() - - if got := <-dataCh; !bytes.Equal(got, want) { - t.Errorf("transmitted data differs") - } -} - -// testPingPong tests that the two endpoints can synchronously send data to -// each other in a typical request-response pattern. -func testPingPong(t *testing.T, c1, c2 net.Conn) { - var wg sync.WaitGroup - defer wg.Wait() - - pingPonger := func(c net.Conn) { - defer wg.Done() - buf := make([]byte, 8) - var prev uint64 - for { - if _, err := io.ReadFull(c, buf); err != nil { - if err == io.EOF { - break - } - t.Errorf("unexpected Read error: %v", err) - } - - v := binary.LittleEndian.Uint64(buf) - binary.LittleEndian.PutUint64(buf, v+1) - if prev != 0 && prev+2 != v { - t.Errorf("mismatching value: got %d, want %d", v, prev+2) - } - prev = v - if v == 1000 { - break - } - - if _, err := c.Write(buf); err != nil { - t.Errorf("unexpected Write error: %v", err) - break - } - } - if err := c.Close(); err != nil { - t.Errorf("unexpected Close error: %v", err) - } - } - - wg.Add(2) - go pingPonger(c1) - go pingPonger(c2) - - // Start off the chain reaction. - if _, err := c1.Write(make([]byte, 8)); err != nil { - t.Errorf("unexpected c1.Write error: %v", err) - } -} - -// testRacyRead tests that it is safe to mutate the input Read buffer -// immediately after cancelation has occurred. -func testRacyRead(t *testing.T, c1, c2 net.Conn) { - go chunkedCopy(c2, rand.New(rand.NewSource(0))) - - var wg sync.WaitGroup - defer wg.Wait() - - c1.SetReadDeadline(time.Now().Add(time.Millisecond)) - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - b1 := make([]byte, 1024) - b2 := make([]byte, 1024) - for j := 0; j < 100; j++ { - _, err := c1.Read(b1) - copy(b1, b2) // Mutate b1 to trigger potential race - if err != nil { - checkForTimeoutError(t, err) - c1.SetReadDeadline(time.Now().Add(time.Millisecond)) - } - } - }() - } -} - -// testRacyWrite tests that it is safe to mutate the input Write buffer -// immediately after cancelation has occurred. -func testRacyWrite(t *testing.T, c1, c2 net.Conn) { - go chunkedCopy(ioutil.Discard, c2) - - var wg sync.WaitGroup - defer wg.Wait() - - c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - b1 := make([]byte, 1024) - b2 := make([]byte, 1024) - for j := 0; j < 100; j++ { - _, err := c1.Write(b1) - copy(b1, b2) // Mutate b1 to trigger potential race - if err != nil { - checkForTimeoutError(t, err) - c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) - } - } - }() - } -} - -// testReadTimeout tests that Read timeouts do not affect Write. -func testReadTimeout(t *testing.T, c1, c2 net.Conn) { - go chunkedCopy(ioutil.Discard, c2) - - c1.SetReadDeadline(aLongTimeAgo) - _, err := c1.Read(make([]byte, 1024)) - checkForTimeoutError(t, err) - if _, err := c1.Write(make([]byte, 1024)); err != nil { - t.Errorf("unexpected Write error: %v", err) - } -} - -// testWriteTimeout tests that Write timeouts do not affect Read. -func testWriteTimeout(t *testing.T, c1, c2 net.Conn) { - go chunkedCopy(c2, rand.New(rand.NewSource(0))) - - c1.SetWriteDeadline(aLongTimeAgo) - _, err := c1.Write(make([]byte, 1024)) - checkForTimeoutError(t, err) - if _, err := c1.Read(make([]byte, 1024)); err != nil { - t.Errorf("unexpected Read error: %v", err) - } -} - -// testPastTimeout tests that a deadline set in the past immediately times out -// Read and Write requests. -func testPastTimeout(t *testing.T, c1, c2 net.Conn) { - go chunkedCopy(c2, c2) - - testRoundtrip(t, c1) - - c1.SetDeadline(aLongTimeAgo) - n, err := c1.Write(make([]byte, 1024)) - if n != 0 { - t.Errorf("unexpected Write count: got %d, want 0", n) - } - checkForTimeoutError(t, err) - n, err = c1.Read(make([]byte, 1024)) - if n != 0 { - t.Errorf("unexpected Read count: got %d, want 0", n) - } - checkForTimeoutError(t, err) - - testRoundtrip(t, c1) -} - -// testPresentTimeout tests that a deadline set while there are pending -// Read and Write operations immediately times out those operations. -func testPresentTimeout(t *testing.T, c1, c2 net.Conn) { - var wg sync.WaitGroup - defer wg.Wait() - wg.Add(3) - - deadlineSet := make(chan bool, 1) - go func() { - defer wg.Done() - time.Sleep(100 * time.Millisecond) - deadlineSet <- true - c1.SetReadDeadline(aLongTimeAgo) - c1.SetWriteDeadline(aLongTimeAgo) - }() - go func() { - defer wg.Done() - n, err := c1.Read(make([]byte, 1024)) - if n != 0 { - t.Errorf("unexpected Read count: got %d, want 0", n) - } - checkForTimeoutError(t, err) - if len(deadlineSet) == 0 { - t.Error("Read timed out before deadline is set") - } - }() - go func() { - defer wg.Done() - var err error - for err == nil { - _, err = c1.Write(make([]byte, 1024)) - } - checkForTimeoutError(t, err) - if len(deadlineSet) == 0 { - t.Error("Write timed out before deadline is set") - } - }() -} - -// testFutureTimeout tests that a future deadline will eventually time out -// Read and Write operations. -func testFutureTimeout(t *testing.T, c1, c2 net.Conn) { - var wg sync.WaitGroup - wg.Add(2) - - c1.SetDeadline(time.Now().Add(100 * time.Millisecond)) - go func() { - defer wg.Done() - _, err := c1.Read(make([]byte, 1024)) - checkForTimeoutError(t, err) - }() - go func() { - defer wg.Done() - var err error - for err == nil { - _, err = c1.Write(make([]byte, 1024)) - } - checkForTimeoutError(t, err) - }() - wg.Wait() - - go chunkedCopy(c2, c2) - resyncConn(t, c1) - testRoundtrip(t, c1) -} - -// testCloseTimeout tests that calling Close immediately times out pending -// Read and Write operations. -func testCloseTimeout(t *testing.T, c1, c2 net.Conn) { - go chunkedCopy(c2, c2) - - var wg sync.WaitGroup - defer wg.Wait() - wg.Add(3) - - // Test for cancelation upon connection closure. - c1.SetDeadline(neverTimeout) - go func() { - defer wg.Done() - time.Sleep(100 * time.Millisecond) - c1.Close() - }() - go func() { - defer wg.Done() - var err error - buf := make([]byte, 1024) - for err == nil { - _, err = c1.Read(buf) - } - }() - go func() { - defer wg.Done() - var err error - buf := make([]byte, 1024) - for err == nil { - _, err = c1.Write(buf) - } - }() -} - -// testConcurrentMethods tests that the methods of net.Conn can safely -// be called concurrently. -func testConcurrentMethods(t *testing.T, c1, c2 net.Conn) { - if runtime.GOOS == "plan9" { - t.Skip("skipping on plan9; see https://golang.org/issue/20489") - } - go chunkedCopy(c2, c2) - - // The results of the calls may be nonsensical, but this should - // not trigger a race detector warning. - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(7) - go func() { - defer wg.Done() - c1.Read(make([]byte, 1024)) - }() - go func() { - defer wg.Done() - c1.Write(make([]byte, 1024)) - }() - go func() { - defer wg.Done() - c1.SetDeadline(time.Now().Add(10 * time.Millisecond)) - }() - go func() { - defer wg.Done() - c1.SetReadDeadline(aLongTimeAgo) - }() - go func() { - defer wg.Done() - c1.SetWriteDeadline(aLongTimeAgo) - }() - go func() { - defer wg.Done() - c1.LocalAddr() - }() - go func() { - defer wg.Done() - c1.RemoteAddr() - }() - } - wg.Wait() // At worst, the deadline is set 10ms into the future - - resyncConn(t, c1) - testRoundtrip(t, c1) -} - -// checkForTimeoutError checks that the error satisfies the Error interface -// and that Timeout returns true. -func checkForTimeoutError(t *testing.T, err error) { - if nerr, ok := err.(net.Error); ok { - if !nerr.Timeout() { - t.Errorf("err.Timeout() = false, want true") - } - } else { - t.Errorf("got %T, want net.Error", err) - } -} - -// testRoundtrip writes something into c and reads it back. -// It assumes that everything written into c is echoed back to itself. -func testRoundtrip(t *testing.T, c net.Conn) { - if err := c.SetDeadline(neverTimeout); err != nil { - t.Errorf("roundtrip SetDeadline error: %v", err) - } - - const s = "Hello, world!" - buf := []byte(s) - if _, err := c.Write(buf); err != nil { - t.Errorf("roundtrip Write error: %v", err) - } - if _, err := io.ReadFull(c, buf); err != nil { - t.Errorf("roundtrip Read error: %v", err) - } - if string(buf) != s { - t.Errorf("roundtrip data mismatch: got %q, want %q", buf, s) - } -} - -// resyncConn resynchronizes the connection into a sane state. -// It assumes that everything written into c is echoed back to itself. -// It assumes that 0xff is not currently on the wire or in the read buffer. -func resyncConn(t *testing.T, c net.Conn) { - c.SetDeadline(neverTimeout) - errCh := make(chan error) - go func() { - _, err := c.Write([]byte{0xff}) - errCh <- err - }() - buf := make([]byte, 1024) - for { - n, err := c.Read(buf) - if n > 0 && bytes.IndexByte(buf[:n], 0xff) == n-1 { - break - } - if err != nil { - t.Errorf("unexpected Read error: %v", err) - break - } - } - if err := <-errCh; err != nil { - t.Errorf("unexpected Write error: %v", err) - } -} - -// chunkedCopy copies from r to w in fixed-width chunks to avoid -// causing a Write that exceeds the maximum packet size for packet-based -// connections like "unixpacket". -// We assume that the maximum packet size is at least 1024. -func chunkedCopy(w io.Writer, r io.Reader) error { - b := make([]byte, 1024) - _, err := io.CopyBuffer(struct{ io.Writer }{w}, struct{ io.Reader }{r}, b) - return err -} diff --git a/vendor/golang.org/x/net/nettest/conntest_go16.go b/vendor/golang.org/x/net/nettest/conntest_go16.go deleted file mode 100644 index 4cbf48e3..00000000 --- a/vendor/golang.org/x/net/nettest/conntest_go16.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package nettest - -import "testing" - -func testConn(t *testing.T, mp MakePipe) { - // Avoid using subtests on Go 1.6 and below. - timeoutWrapper(t, mp, testBasicIO) - timeoutWrapper(t, mp, testPingPong) - timeoutWrapper(t, mp, testRacyRead) - timeoutWrapper(t, mp, testRacyWrite) - timeoutWrapper(t, mp, testReadTimeout) - timeoutWrapper(t, mp, testWriteTimeout) - timeoutWrapper(t, mp, testPastTimeout) - timeoutWrapper(t, mp, testPresentTimeout) - timeoutWrapper(t, mp, testFutureTimeout) - timeoutWrapper(t, mp, testCloseTimeout) - timeoutWrapper(t, mp, testConcurrentMethods) -} diff --git a/vendor/golang.org/x/net/nettest/conntest_go17.go b/vendor/golang.org/x/net/nettest/conntest_go17.go deleted file mode 100644 index fa039f03..00000000 --- a/vendor/golang.org/x/net/nettest/conntest_go17.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package nettest - -import "testing" - -func testConn(t *testing.T, mp MakePipe) { - // Use subtests on Go 1.7 and above since it is better organized. - t.Run("BasicIO", func(t *testing.T) { timeoutWrapper(t, mp, testBasicIO) }) - t.Run("PingPong", func(t *testing.T) { timeoutWrapper(t, mp, testPingPong) }) - t.Run("RacyRead", func(t *testing.T) { timeoutWrapper(t, mp, testRacyRead) }) - t.Run("RacyWrite", func(t *testing.T) { timeoutWrapper(t, mp, testRacyWrite) }) - t.Run("ReadTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testReadTimeout) }) - t.Run("WriteTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testWriteTimeout) }) - t.Run("PastTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPastTimeout) }) - t.Run("PresentTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPresentTimeout) }) - t.Run("FutureTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testFutureTimeout) }) - t.Run("CloseTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testCloseTimeout) }) - t.Run("ConcurrentMethods", func(t *testing.T) { timeoutWrapper(t, mp, testConcurrentMethods) }) -} diff --git a/vendor/golang.org/x/net/nettest/conntest_test.go b/vendor/golang.org/x/net/nettest/conntest_test.go deleted file mode 100644 index 9f9453fb..00000000 --- a/vendor/golang.org/x/net/nettest/conntest_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package nettest - -import ( - "net" - "os" - "runtime" - "testing" - - "golang.org/x/net/internal/nettest" -) - -func TestTestConn(t *testing.T) { - tests := []struct{ name, network string }{ - {"TCP", "tcp"}, - {"UnixPipe", "unix"}, - {"UnixPacketPipe", "unixpacket"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if !nettest.TestableNetwork(tt.network) { - t.Skipf("not supported on %s", runtime.GOOS) - } - - mp := func() (c1, c2 net.Conn, stop func(), err error) { - ln, err := nettest.NewLocalListener(tt.network) - if err != nil { - return nil, nil, nil, err - } - - // Start a connection between two endpoints. - var err1, err2 error - done := make(chan bool) - go func() { - c2, err2 = ln.Accept() - close(done) - }() - c1, err1 = net.Dial(ln.Addr().Network(), ln.Addr().String()) - <-done - - stop = func() { - if err1 == nil { - c1.Close() - } - if err2 == nil { - c2.Close() - } - ln.Close() - switch tt.network { - case "unix", "unixpacket": - os.Remove(ln.Addr().String()) - } - } - - switch { - case err1 != nil: - stop() - return nil, nil, nil, err1 - case err2 != nil: - stop() - return nil, nil, nil, err2 - default: - return c1, c2, stop, nil - } - } - - TestConn(t, mp) - }) - } -} diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go deleted file mode 100644 index 56f43bf6..00000000 --- a/vendor/golang.org/x/net/netutil/listen.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package netutil provides network utility functions, complementing the more -// common ones in the net package. -package netutil // import "golang.org/x/net/netutil" - -import ( - "net" - "sync" -) - -// LimitListener returns a Listener that accepts at most n simultaneous -// connections from the provided Listener. -func LimitListener(l net.Listener, n int) net.Listener { - return &limitListener{l, make(chan struct{}, n)} -} - -type limitListener struct { - net.Listener - sem chan struct{} -} - -func (l *limitListener) acquire() { l.sem <- struct{}{} } -func (l *limitListener) release() { <-l.sem } - -func (l *limitListener) Accept() (net.Conn, error) { - l.acquire() - c, err := l.Listener.Accept() - if err != nil { - l.release() - return nil, err - } - return &limitListenerConn{Conn: c, release: l.release}, nil -} - -type limitListenerConn struct { - net.Conn - releaseOnce sync.Once - release func() -} - -func (l *limitListenerConn) Close() error { - err := l.Conn.Close() - l.releaseOnce.Do(l.release) - return err -} diff --git a/vendor/golang.org/x/net/netutil/listen_test.go b/vendor/golang.org/x/net/netutil/listen_test.go deleted file mode 100644 index 5e07d7be..00000000 --- a/vendor/golang.org/x/net/netutil/listen_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package netutil - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "sync" - "sync/atomic" - "testing" - "time" - - "golang.org/x/net/internal/nettest" -) - -func TestLimitListener(t *testing.T) { - const max = 5 - attempts := (nettest.MaxOpenFiles() - max) / 2 - if attempts > 256 { // maximum length of accept queue is 128 by default - attempts = 256 - } - - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - defer l.Close() - l = LimitListener(l, max) - - var open int32 - go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if n := atomic.AddInt32(&open, 1); n > max { - t.Errorf("%d open connections, want <= %d", n, max) - } - defer atomic.AddInt32(&open, -1) - time.Sleep(10 * time.Millisecond) - fmt.Fprint(w, "some body") - })) - - var wg sync.WaitGroup - var failed int32 - for i := 0; i < attempts; i++ { - wg.Add(1) - go func() { - defer wg.Done() - c := http.Client{Timeout: 3 * time.Second} - r, err := c.Get("http://" + l.Addr().String()) - if err != nil { - t.Log(err) - atomic.AddInt32(&failed, 1) - return - } - defer r.Body.Close() - io.Copy(ioutil.Discard, r.Body) - }() - } - wg.Wait() - - // We expect some Gets to fail as the kernel's accept queue is filled, - // but most should succeed. - if int(failed) >= attempts/2 { - t.Errorf("%d requests failed within %d attempts", failed, attempts) - } -} - -type errorListener struct { - net.Listener -} - -func (errorListener) Accept() (net.Conn, error) { - return nil, errFake -} - -var errFake = errors.New("fake error from errorListener") - -// This used to hang. -func TestLimitListenerError(t *testing.T) { - donec := make(chan bool, 1) - go func() { - const n = 2 - ll := LimitListener(errorListener{}, n) - for i := 0; i < n+1; i++ { - _, err := ll.Accept() - if err != errFake { - t.Fatalf("Accept error = %v; want errFake", err) - } - } - donec <- true - }() - select { - case <-donec: - case <-time.After(5 * time.Second): - t.Fatal("timeout. deadlock?") - } -} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index 242d5623..0689bb6a 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -61,7 +61,7 @@ func (p *PerHost) dialerForRequest(host string) Dialer { return p.bypass } if host == zone[1:] { - // For a zone "example.com", we match "example.com" + // For a zone ".example.com", we match "example.com" // too. return p.bypass } diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go deleted file mode 100644 index a7d80957..00000000 --- a/vendor/golang.org/x/net/proxy/per_host_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "errors" - "net" - "reflect" - "testing" -) - -type recordingProxy struct { - addrs []string -} - -func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) { - r.addrs = append(r.addrs, addr) - return nil, errors.New("recordingProxy") -} - -func TestPerHost(t *testing.T) { - var def, bypass recordingProxy - perHost := NewPerHost(&def, &bypass) - perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16") - - expectedDef := []string{ - "example.com:123", - "1.2.3.4:123", - "[1001::]:123", - } - expectedBypass := []string{ - "localhost:123", - "zone:123", - "foo.zone:123", - "127.0.0.1:123", - "10.1.2.3:123", - "[1000::]:123", - } - - for _, addr := range expectedDef { - perHost.Dial("tcp", addr) - } - for _, addr := range expectedBypass { - perHost.Dial("tcp", addr) - } - - if !reflect.DeepEqual(expectedDef, def.addrs) { - t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef) - } - if !reflect.DeepEqual(expectedBypass, bypass.addrs) { - t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass) - } -} diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go deleted file mode 100644 index 0f31e211..00000000 --- a/vendor/golang.org/x/net/proxy/proxy_test.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "bytes" - "fmt" - "io" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" - "testing" -) - -type proxyFromEnvTest struct { - allProxyEnv string - noProxyEnv string - wantTypeOf Dialer -} - -func (t proxyFromEnvTest) String() string { - var buf bytes.Buffer - space := func() { - if buf.Len() > 0 { - buf.WriteByte(' ') - } - } - if t.allProxyEnv != "" { - fmt.Fprintf(&buf, "all_proxy=%q", t.allProxyEnv) - } - if t.noProxyEnv != "" { - space() - fmt.Fprintf(&buf, "no_proxy=%q", t.noProxyEnv) - } - return strings.TrimSpace(buf.String()) -} - -func TestFromEnvironment(t *testing.T) { - ResetProxyEnv() - - type dummyDialer struct { - direct - } - - RegisterDialerType("irc", func(_ *url.URL, _ Dialer) (Dialer, error) { - return dummyDialer{}, nil - }) - - proxyFromEnvTests := []proxyFromEnvTest{ - {allProxyEnv: "127.0.0.1:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, - {allProxyEnv: "ftp://example.com:8000", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, - {allProxyEnv: "socks5://example.com:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: &PerHost{}}, - {allProxyEnv: "irc://example.com:8000", wantTypeOf: dummyDialer{}}, - {noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, - {wantTypeOf: direct{}}, - } - - for _, tt := range proxyFromEnvTests { - os.Setenv("ALL_PROXY", tt.allProxyEnv) - os.Setenv("NO_PROXY", tt.noProxyEnv) - ResetCachedEnvironment() - - d := FromEnvironment() - if got, want := fmt.Sprintf("%T", d), fmt.Sprintf("%T", tt.wantTypeOf); got != want { - t.Errorf("%v: got type = %T, want %T", tt, d, tt.wantTypeOf) - } - } -} - -func TestFromURL(t *testing.T) { - endSystem, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("net.Listen failed: %v", err) - } - defer endSystem.Close() - gateway, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("net.Listen failed: %v", err) - } - defer gateway.Close() - - var wg sync.WaitGroup - wg.Add(1) - go socks5Gateway(t, gateway, endSystem, socks5Domain, &wg) - - url, err := url.Parse("socks5://user:password@" + gateway.Addr().String()) - if err != nil { - t.Fatalf("url.Parse failed: %v", err) - } - proxy, err := FromURL(url, Direct) - if err != nil { - t.Fatalf("FromURL failed: %v", err) - } - _, port, err := net.SplitHostPort(endSystem.Addr().String()) - if err != nil { - t.Fatalf("net.SplitHostPort failed: %v", err) - } - if c, err := proxy.Dial("tcp", "localhost:"+port); err != nil { - t.Fatalf("FromURL.Dial failed: %v", err) - } else { - c.Close() - } - - wg.Wait() -} - -func TestSOCKS5(t *testing.T) { - endSystem, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("net.Listen failed: %v", err) - } - defer endSystem.Close() - gateway, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("net.Listen failed: %v", err) - } - defer gateway.Close() - - var wg sync.WaitGroup - wg.Add(1) - go socks5Gateway(t, gateway, endSystem, socks5IP4, &wg) - - proxy, err := SOCKS5("tcp", gateway.Addr().String(), nil, Direct) - if err != nil { - t.Fatalf("SOCKS5 failed: %v", err) - } - if c, err := proxy.Dial("tcp", endSystem.Addr().String()); err != nil { - t.Fatalf("SOCKS5.Dial failed: %v", err) - } else { - c.Close() - } - - wg.Wait() -} - -func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) { - defer wg.Done() - - c, err := gateway.Accept() - if err != nil { - t.Errorf("net.Listener.Accept failed: %v", err) - return - } - defer c.Close() - - b := make([]byte, 32) - var n int - if typ == socks5Domain { - n = 4 - } else { - n = 3 - } - if _, err := io.ReadFull(c, b[:n]); err != nil { - t.Errorf("io.ReadFull failed: %v", err) - return - } - if _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil { - t.Errorf("net.Conn.Write failed: %v", err) - return - } - if typ == socks5Domain { - n = 16 - } else { - n = 10 - } - if _, err := io.ReadFull(c, b[:n]); err != nil { - t.Errorf("io.ReadFull failed: %v", err) - return - } - if b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ { - t.Errorf("got an unexpected packet: %#02x %#02x %#02x %#02x", b[0], b[1], b[2], b[3]) - return - } - if typ == socks5Domain { - copy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9}) - b = append(b, []byte("localhost")...) - } else { - copy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4}) - } - host, port, err := net.SplitHostPort(endSystem.Addr().String()) - if err != nil { - t.Errorf("net.SplitHostPort failed: %v", err) - return - } - b = append(b, []byte(net.ParseIP(host).To4())...) - p, err := strconv.Atoi(port) - if err != nil { - t.Errorf("strconv.Atoi failed: %v", err) - return - } - b = append(b, []byte{byte(p >> 8), byte(p)}...) - if _, err := c.Write(b); err != nil { - t.Errorf("net.Conn.Write failed: %v", err) - return - } -} - -func ResetProxyEnv() { - for _, env := range []*envOnce{allProxyEnv, noProxyEnv} { - for _, v := range env.names { - os.Setenv(v, "") - } - } - ResetCachedEnvironment() -} - -func ResetCachedEnvironment() { - allProxyEnv.reset() - noProxyEnv.reset() -} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go index 2efec6e8..56345ec8 100644 --- a/vendor/golang.org/x/net/proxy/socks5.go +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -5,209 +5,32 @@ package proxy import ( - "errors" - "io" + "context" "net" - "strconv" -) - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928. -func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { - s := &socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} -type socks5 struct { - user, password string - network, addr string - forward Dialer -} - -const socks5Version = 5 - -const ( - socks5AuthNone = 0 - socks5AuthPassword = 2 + "golang.org/x/net/internal/socks" ) -const socks5Connect = 1 - -const ( - socks5IP4 = 1 - socks5Domain = 3 - socks5IP6 = 4 -) - -var socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the network net via the SOCKS5 proxy. -func (s *socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - if buf[1] == socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + d.ProxyDial = func(_ context.Context, network string, address string) (net.Conn, error) { + return forward.Dial(network, address) } } - - buf = buf[:0] - buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, socks5IP4) - ip = ip4 - } else { - buf = append(buf, socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, } - buf = append(buf, socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(socks5Errors) { - failure = socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case socks5IP4: - bytesToDiscard = net.IPv4len - case socks5IP6: - bytesToDiscard = net.IPv6len - case socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + d.Authenticate = up.Authenticate } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - return nil + return d, nil } diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go deleted file mode 100644 index a2d49952..00000000 --- a/vendor/golang.org/x/net/publicsuffix/gen.go +++ /dev/null @@ -1,713 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates table.go and table_test.go based on the authoritative -// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat -// -// The version is derived from -// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat -// and a human-readable form is at -// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat -// -// To fetch a particular git revision, such as 5c70ccd250, pass -// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" -// and -version "an explicit version string". - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "sort" - "strings" - - "golang.org/x/net/idna" -) - -const ( - // These sum of these four values must be no greater than 32. - nodesBitsChildren = 9 - nodesBitsICANN = 1 - nodesBitsTextOffset = 15 - nodesBitsTextLength = 6 - - // These sum of these four values must be no greater than 32. - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -var ( - maxChildren int - maxTextOffset int - maxTextLength int - maxHi uint32 - maxLo uint32 -) - -func max(a, b int) int { - if a < b { - return b - } - return a -} - -func u32max(a, b uint32) uint32 { - if a < b { - return b - } - return a -} - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 - numNodeType = 3 -) - -func nodeTypeStr(n int) string { - switch n { - case nodeTypeNormal: - return "+" - case nodeTypeException: - return "!" - case nodeTypeParentOnly: - return "o" - } - panic("unreachable") -} - -const ( - defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" - gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" -) - -var ( - labelEncoding = map[string]uint32{} - labelsList = []string{} - labelsMap = map[string]bool{} - rules = []string{} - - // validSuffixRE is used to check that the entries in the public suffix - // list are in canonical form (after Punycode encoding). Specifically, - // capital letters are not allowed. - validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) - - shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) - dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) - - comments = flag.Bool("comments", false, "generate table.go comments, for debugging") - subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") - url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") - v = flag.Bool("v", false, "verbose output (to stderr)") - version = flag.String("version", "", "the effective_tld_names.dat version") -) - -func main() { - if err := main1(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main1() error { - flag.Parse() - if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { - return fmt.Errorf("not enough bits to encode the nodes table") - } - if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { - return fmt.Errorf("not enough bits to encode the children table") - } - if *version == "" { - if *url != defaultURL { - return fmt.Errorf("-version was not specified, and the -url is not the default one") - } - sha, date, err := gitCommit() - if err != nil { - return err - } - *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) - } - var r io.Reader = os.Stdin - if *url != "" { - res, err := http.Get(*url) - if err != nil { - return err - } - if res.StatusCode != http.StatusOK { - return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) - } - r = res.Body - defer res.Body.Close() - } - - var root node - icann := false - br := bufio.NewReader(r) - for { - s, err := br.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return err - } - s = strings.TrimSpace(s) - if strings.Contains(s, "BEGIN ICANN DOMAINS") { - icann = true - continue - } - if strings.Contains(s, "END ICANN DOMAINS") { - icann = false - continue - } - if s == "" || strings.HasPrefix(s, "//") { - continue - } - s, err = idna.ToASCII(s) - if err != nil { - return err - } - if !validSuffixRE.MatchString(s) { - return fmt.Errorf("bad publicsuffix.org list data: %q", s) - } - - if *subset { - switch { - case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): - case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): - case s == "ao" || strings.HasSuffix(s, ".ao"): - case s == "ar" || strings.HasSuffix(s, ".ar"): - case s == "arpa" || strings.HasSuffix(s, ".arpa"): - case s == "cy" || strings.HasSuffix(s, ".cy"): - case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): - case s == "jp": - case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): - case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): - case s == "om" || strings.HasSuffix(s, ".om"): - case s == "uk" || strings.HasSuffix(s, ".uk"): - case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): - case s == "tw" || strings.HasSuffix(s, ".tw"): - case s == "zw" || strings.HasSuffix(s, ".zw"): - case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): - // xn--p1ai is Russian-Cyrillic "рф". - default: - continue - } - } - - rules = append(rules, s) - - nt, wildcard := nodeTypeNormal, false - switch { - case strings.HasPrefix(s, "*."): - s, nt = s[2:], nodeTypeParentOnly - wildcard = true - case strings.HasPrefix(s, "!"): - s, nt = s[1:], nodeTypeException - } - labels := strings.Split(s, ".") - for n, i := &root, len(labels)-1; i >= 0; i-- { - label := labels[i] - n = n.child(label) - if i == 0 { - if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { - n.nodeType = nt - } - n.icann = n.icann && icann - n.wildcard = n.wildcard || wildcard - } - labelsMap[label] = true - } - } - labelsList = make([]string, 0, len(labelsMap)) - for label := range labelsMap { - labelsList = append(labelsList, label) - } - sort.Strings(labelsList) - - if err := generate(printReal, &root, "table.go"); err != nil { - return err - } - if err := generate(printTest, &root, "table_test.go"); err != nil { - return err - } - return nil -} - -func generate(p func(io.Writer, *node) error, root *node, filename string) error { - buf := new(bytes.Buffer) - if err := p(buf, root); err != nil { - return err - } - b, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - return ioutil.WriteFile(filename, b, 0644) -} - -func gitCommit() (sha, date string, retErr error) { - res, err := http.Get(gitCommitURL) - if err != nil { - return "", "", err - } - if res.StatusCode != http.StatusOK { - return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) - } - defer res.Body.Close() - b, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if m := shaRE.FindSubmatch(b); m != nil { - sha = string(m[1]) - } - if m := dateRE.FindSubmatch(b); m != nil { - date = string(m[1]) - } - if sha == "" || date == "" { - retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) - } - return sha, date, retErr -} - -func printTest(w io.Writer, n *node) error { - fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") - for _, rule := range rules { - fmt.Fprintf(w, "%q,\n", rule) - } - fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") - if err := n.walk(w, printNodeLabel); err != nil { - return err - } - fmt.Fprintf(w, "}\n") - return nil -} - -func printReal(w io.Writer, n *node) error { - const header = `// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -const version = %q - -const ( - nodesBitsChildren = %d - nodesBitsICANN = %d - nodesBitsTextOffset = %d - nodesBitsTextLength = %d - - childrenBitsWildcard = %d - childrenBitsNodeType = %d - childrenBitsHi = %d - childrenBitsLo = %d -) - -const ( - nodeTypeNormal = %d - nodeTypeException = %d - nodeTypeParentOnly = %d -) - -// numTLD is the number of top level domains. -const numTLD = %d - -` - fmt.Fprintf(w, header, *version, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, - nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) - - text := combineText(labelsList) - if text == "" { - return fmt.Errorf("internal error: makeText returned no text") - } - for _, label := range labelsList { - offset, length := strings.Index(text, label), len(label) - if offset < 0 { - return fmt.Errorf("internal error: could not find %q in text %q", label, text) - } - maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) - if offset >= 1<= 1< 64 { - n, plus = 64, " +" - } - fmt.Fprintf(w, "%q%s\n", text[:n], plus) - text = text[n:] - } - - if err := n.walk(w, assignIndexes); err != nil { - return err - } - - fmt.Fprintf(w, ` - -// nodes is the list of nodes. Each node is represented as a uint32, which -// encodes the node's children, wildcard bit and node type (as an index into -// the children array), ICANN bit and text. -// -// If the table was generated with the -comments flag, there is a //-comment -// after each node's data. In it is the nodes-array indexes of the children, -// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The -// nodeType is printed as + for normal, ! for exception, and o for parent-only -// nodes that have children but don't match a domain label in their own right. -// An I denotes an ICANN domain. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] children index -// [%2d bits] ICANN bit -// [%2d bits] text index -// [%2d bits] text length -var nodes = [...]uint32{ -`, - 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) - if err := n.walk(w, printNode); err != nil { - return err - } - fmt.Fprintf(w, `} - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] wildcard bit -// [%2d bits] node type -// [%2d bits] high nodes index (exclusive) of children -// [%2d bits] low nodes index (inclusive) of children -var children=[...]uint32{ -`, - 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) - for i, c := range childrenEncoding { - s := "---------------" - lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 - if *comments { - fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", - c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) - } else { - fmt.Fprintf(w, "0x%x,\n", c) - } - } - fmt.Fprintf(w, "}\n\n") - fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { - ss = ss[1:] - } - return ss -} - -// crush combines a list of strings, taking advantage of overlaps. It returns a -// single string that contains each input string as a substring. -func crush(ss []string) string { - maxLabelLen := 0 - for _, s := range ss { - if maxLabelLen < len(s) { - maxLabelLen = len(s) - } - } - - for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { - prefixes := makePrefixMap(ss, prefixLen) - for i, s := range ss { - if len(s) <= prefixLen { - continue - } - mergeLabel(ss, i, prefixLen, prefixes) - } - } - - return strings.Join(ss, "") -} - -// mergeLabel merges the label at ss[i] with the first available matching label -// in prefixMap, where the last "prefixLen" characters in ss[i] match the first -// "prefixLen" characters in the matching label. -// It will merge ss[i] repeatedly until no more matches are available. -// All matching labels merged into ss[i] are replaced by "". -func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { - s := ss[i] - suffix := s[len(s)-prefixLen:] - for _, j := range prefixes[suffix] { - // Empty strings mean "already used." Also avoid merging with self. - if ss[j] == "" || i == j { - continue - } - if *v { - fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", - prefixLen, i, j, ss[i], ss[j], suffix) - } - ss[i] += ss[j][prefixLen:] - ss[j] = "" - // ss[i] has a new suffix, so merge again if possible. - // Note: we only have to merge again at the same prefix length. Shorter - // prefix lengths will be handled in the next iteration of crush's for loop. - // Can there be matches for longer prefix lengths, introduced by the merge? - // I believe that any such matches would by necessity have been eliminated - // during substring removal or merged at a higher prefix length. For - // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" - // would yield "abcde", which could be merged with "bcdef." However, in - // practice "cde" would already have been elimintated by removeSubstrings. - mergeLabel(ss, i, prefixLen, prefixes) - return - } -} - -// prefixMap maps from a prefix to a list of strings containing that prefix. The -// list of strings is represented as indexes into a slice of strings stored -// elsewhere. -type prefixMap map[string][]int - -// makePrefixMap constructs a prefixMap from a slice of strings. -func makePrefixMap(ss []string, prefixLen int) prefixMap { - prefixes := make(prefixMap) - for i, s := range ss { - // We use < rather than <= because if a label matches on a prefix equal to - // its full length, that's actually a substring match handled by - // removeSubstrings. - if prefixLen < len(s) { - prefix := s[:prefixLen] - prefixes[prefix] = append(prefixes[prefix], i) - } - } - - return prefixes -} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go deleted file mode 100644 index 8bbf3bcd..00000000 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// Package publicsuffix provides a public suffix list based on data from -// http://publicsuffix.org/. A public suffix is one under which Internet users -// can directly register names. -package publicsuffix // import "golang.org/x/net/publicsuffix" - -// TODO: specify case sensitivity and leading/trailing dot behavior for -// func PublicSuffix and func EffectiveTLDPlusOne. - -import ( - "fmt" - "net/http/cookiejar" - "strings" -) - -// List implements the cookiejar.PublicSuffixList interface by calling the -// PublicSuffix function. -var List cookiejar.PublicSuffixList = list{} - -type list struct{} - -func (list) PublicSuffix(domain string) string { - ps, _ := PublicSuffix(domain) - return ps -} - -func (list) String() string { - return version -} - -// PublicSuffix returns the public suffix of the domain using a copy of the -// publicsuffix.org database compiled into the library. -// -// icann is whether the public suffix is managed by the Internet Corporation -// for Assigned Names and Numbers. If not, the public suffix is privately -// managed. For example, foo.org and foo.co.uk are ICANN domains, -// foo.dyndns.org and foo.blogspot.co.uk are private domains. -// -// Use cases for distinguishing ICANN domains like foo.com from private -// domains like foo.appspot.com can be found at -// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases -func PublicSuffix(domain string) (publicSuffix string, icann bool) { - lo, hi := uint32(0), uint32(numTLD) - s, suffix, wildcard := domain, len(domain), false -loop: - for { - dot := strings.LastIndex(s, ".") - if wildcard { - suffix = 1 + dot - } - if lo == hi { - break - } - f := find(s[1+dot:], lo, hi) - if f == notFound { - break - } - - u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) - icann = u&(1<>= nodesBitsICANN - u = children[u&(1<>= childrenBitsLo - hi = u & (1<>= childrenBitsHi - switch u & (1<>= childrenBitsNodeType - wildcard = u&(1<>= nodesBitsTextLength - offset := x & (1< len(b[j]) -} - -// eTLDPlusOneTestCases come from -// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt -var eTLDPlusOneTestCases = []struct { - domain, want string -}{ - // Empty input. - {"", ""}, - // Unlisted TLD. - {"example", ""}, - {"example.example", "example.example"}, - {"b.example.example", "example.example"}, - {"a.b.example.example", "example.example"}, - // TLD with only 1 rule. - {"biz", ""}, - {"domain.biz", "domain.biz"}, - {"b.domain.biz", "domain.biz"}, - {"a.b.domain.biz", "domain.biz"}, - // TLD with some 2-level rules. - {"com", ""}, - {"example.com", "example.com"}, - {"b.example.com", "example.com"}, - {"a.b.example.com", "example.com"}, - {"uk.com", ""}, - {"example.uk.com", "example.uk.com"}, - {"b.example.uk.com", "example.uk.com"}, - {"a.b.example.uk.com", "example.uk.com"}, - {"test.ac", "test.ac"}, - // TLD with only 1 (wildcard) rule. - {"mm", ""}, - {"c.mm", ""}, - {"b.c.mm", "b.c.mm"}, - {"a.b.c.mm", "b.c.mm"}, - // More complex TLD. - {"jp", ""}, - {"test.jp", "test.jp"}, - {"www.test.jp", "test.jp"}, - {"ac.jp", ""}, - {"test.ac.jp", "test.ac.jp"}, - {"www.test.ac.jp", "test.ac.jp"}, - {"kyoto.jp", ""}, - {"test.kyoto.jp", "test.kyoto.jp"}, - {"ide.kyoto.jp", ""}, - {"b.ide.kyoto.jp", "b.ide.kyoto.jp"}, - {"a.b.ide.kyoto.jp", "b.ide.kyoto.jp"}, - {"c.kobe.jp", ""}, - {"b.c.kobe.jp", "b.c.kobe.jp"}, - {"a.b.c.kobe.jp", "b.c.kobe.jp"}, - {"city.kobe.jp", "city.kobe.jp"}, - {"www.city.kobe.jp", "city.kobe.jp"}, - // TLD with a wildcard rule and exceptions. - {"ck", ""}, - {"test.ck", ""}, - {"b.test.ck", "b.test.ck"}, - {"a.b.test.ck", "b.test.ck"}, - {"www.ck", "www.ck"}, - {"www.www.ck", "www.ck"}, - // US K12. - {"us", ""}, - {"test.us", "test.us"}, - {"www.test.us", "test.us"}, - {"ak.us", ""}, - {"test.ak.us", "test.ak.us"}, - {"www.test.ak.us", "test.ak.us"}, - {"k12.ak.us", ""}, - {"test.k12.ak.us", "test.k12.ak.us"}, - {"www.test.k12.ak.us", "test.k12.ak.us"}, - // Punycoded IDN labels - {"xn--85x722f.com.cn", "xn--85x722f.com.cn"}, - {"xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, - {"www.xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, - {"shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn"}, - {"xn--55qx5d.cn", ""}, - {"xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, - {"www.xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, - {"shishi.xn--fiqs8s", "shishi.xn--fiqs8s"}, - {"xn--fiqs8s", ""}, -} - -func TestEffectiveTLDPlusOne(t *testing.T) { - for _, tc := range eTLDPlusOneTestCases { - got, _ := EffectiveTLDPlusOne(tc.domain) - if got != tc.want { - t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) - } - } -} diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go deleted file mode 100644 index 4f6bc8de..00000000 --- a/vendor/golang.org/x/net/publicsuffix/table.go +++ /dev/null @@ -1,9419 +0,0 @@ -// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -const version = "publicsuffix.org's public_suffix_list.dat, git revision 38b238d6324042f2c2e6270459d1f4ccfe789fba (2017-08-28T20:09:01Z)" - -const ( - nodesBitsChildren = 9 - nodesBitsICANN = 1 - nodesBitsTextOffset = 15 - nodesBitsTextLength = 6 - - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 -) - -// numTLD is the number of top level domains. -const numTLD = 1557 - -// Text is the combined text of all labels. -const text = "bifukagawalterbihorologyukuhashimoichinosekigaharaxastronomy-gat" + - "ewaybomloans3-ca-central-1bikedagestangeorgeorgiabilbaogakihokum" + - "akogengerdalces3-website-us-west-1billustrationikinuyamashinashi" + - "kitchenikkoebenhavnikolaevents3-website-us-west-2bioddabirdartce" + - "nterprisesakikugawarszawashingtondclkariyameldalindesnesakurainv" + - "estmentsakyotanabellunord-odalivornomutashinainzais-a-candidateb" + - "irkenesoddtangenovaraumalopolskanlandrayddnsfreebox-oslocus-3bir" + - "thplacebitballooningladefinimakanegasakindlegokasells-for-lessal" + - "angenikonantankarlsoyurihonjoyentattoolsztynsettlersalondonetska" + - "rmoyusuharabjarkoyusuisserveexchangebjerkreimbalsfjordgcahcesuol" + - "ocalhostrodawaraugustowadaegubalsanagochihayaakasakawaharanzanne" + - "frankfurtarumizusawabkhaziamallamagazineat-url-o-g-i-naturalhist" + - "orymuseumcentereviewskrakowebredirectmeteorappaleobihirosakikami" + - "jimabogadocscbgdyniabruzzoologicalvinklein-addrammenuernberggfar" + - "merseinebinagisochildrensgardenaturalsciencesnaturelles3-ap-nort" + - "heast-2ixboxenapponazure-mobileastcoastaldefenceatonsberg12000em" + - "mafanconagawakayamadridvagsoyericssonyoursidealerimo-i-ranaamesj" + - "evuemielno-ip6bjugninohekinannestadraydnsaltdalombardiamondsalva" + - "dordalibabalatinord-frontierblockbustermezjavald-aostaplesalzbur" + - "glassassinationalheritagematsubarakawagoebloombergbauerninomiyak" + - "onojosoyrorosamegawabloxcmsamnangerbluedancebmoattachmentsamsclu" + - "bindalombardynamisches-dnsamsungleezebmsandvikcoromantovalle-d-a" + - "ostathellebmwedeployuufcfanirasakis-a-catererbnpparibaselburgliw" + - "icebnrwegroweibolzanorddalomzaporizhzheguris-a-celticsfanishiaza" + - "is-a-chefarmsteadrivelandrobaknoluoktachikawalbrzycharternidrudu" + - "nsanfranciscofreakunedre-eikerbonnishigoppdalorenskoglobalashovh" + - "achinohedmarkarpaczeladzlglobodoes-itvedestrandupontariobookingl" + - "ogoweirboomladbrokesangobootsanjournalismailillesandefjordurbana" + - "mexnetlifyis-a-conservativefsnillfjordurhamburgloppenzaogashimad" + - "achicagoboatsannanishiharaboschaefflerdalotenkawabostikaruizawab" + - "ostonakijinsekikogentingmbhartiffanyuzawabotanicalgardenishiizun" + - "azukis-a-cpadualstackspace-to-rentalstomakomaibarabotanicgardeni" + - "shikatakayamatta-varjjataxihuanishikatsuragit-repostfoldnavybota" + - "nybouncemerckmsdnipropetrovskjervoyagebounty-fullensakerryproper" + - "tiesannohelplfinancialotteboutiquebecngminakamichiharabozentsuji" + - "iebplacedekagaminordkappgafanpachigasakievennodesashibetsukumiya" + - "mazonawsaarlandyndns-at-workinggroupalmspringsakerbrandywinevall" + - "eybrasiliabresciabrindisibenikebristoloseyouripirangapartmentsan" + - "okarumaifarsundyndns-blogdnsantabarbarabritishcolumbialowiezachp" + - "omorskienishikawazukamitsuebroadcastlefrakkestadyndns-freeboxost" + - "rowwlkpmgmodenakatombetsumitakagiizebroadwaybroke-itgorybrokerbr" + - "onnoysundyndns-homednsantacruzsantafedjeffersonishimerabrotherme" + - "saverdeatnurembergmxfinitybrowsersafetymarketsanukis-a-cubicle-s" + - "lavellinotteroybrumunddalottokonamegatakasugais-a-democratjeldsu" + - "ndyndns-ipamperedchefashionishinomiyashironobrunelasticbeanstalk" + - "asaokaminoyamaxunusualpersonishinoomotegobrusselsaotomeloyalistj" + - "ordalshalsenishinoshimattelefonicarbonia-iglesias-carboniaiglesi" + - "ascarboniabruxellesapodlasiellaktyubinskiptveterinairealtorlandy" + - "ndns-mailouvrehabmerbryanskleppanamabrynewjerseybuskerudinewport" + - "lligatjmaxxxjaworznowtv-infoodnetworkshoppingrimstadyndns-office" + - "-on-the-webcambulancebuzenishiokoppegardyndns-picsapporobuzzpana" + - "sonicateringebugattipschlesischesardegnamsskoganeis-a-designerim" + - "arumorimachidabwfastlylbaltimore-og-romsdalillyokozehimejibigawa" + - "ukraanghkeymachinewhampshirebungoonord-aurdalpha-myqnapcloudacce" + - "sscambridgestonemurorangeiseiyoichippubetsubetsugaruhrhcloudns3-" + - "eu-central-1bzhitomirumalselvendrellowiczest-le-patronishitosash" + - "imizunaminamiashigaracompute-1computerhistoryofscience-fictionco" + - "msecuritytacticsaseboknowsitallvivano-frankivskasuyanagawacondos" + - "hichinohealth-carereformitakeharaconferenceconstructionconsulado" + - "esntexistanbullensvanguardyndns-workisboringrueconsultanthropolo" + - "gyconsultingvollcontactoyonocontemporaryarteducationalchikugodoh" + - "aruovatoyookannamifunecontractorskenconventureshinodearthdfcbank" + - "aszubycookingchannelsdvrdnsdojoetsuwanouchikujogaszczytnordreisa" + - "-geekatowicecoolkuszkolahppiacenzaganquannakadomarineustarhubsas" + - "katchewancooperaunitemp-dnsassaris-a-gurulsandoycopenhagencyclop" + - "edichernihivanovodkagoshimalvikashibatakashimaseratis-a-financia" + - "ladvisor-aurdalucaniacorsicagliaridagawashtenawdev-myqnapcloudap" + - "plebtimnetzwhoswhokksundyndns1corvettenrightathomeftparliamentoy" + - "osatoyakokonoecosenzakopanerairguardiann-arboretumbriacosidnsfor" + - "-better-thanawatchesatxn--12c1fe0bradescorporationcostumedio-cam" + - "pidano-mediocampidanomediocouchpotatofriesaudacouncilcouponsauhe" + - "radynnsavannahgacoursesaves-the-whalessandria-trani-barletta-and" + - "riatranibarlettaandriacqhachiojiyahoooshikamaishimodatecranbrook" + - "uwanalyticsavonaplesaxocreditcardynulvikatsushikabeeldengeluidyn" + - "v6creditunioncremonashgabadaddjambylcrewiiheyakagecricketrzyncri" + - "meast-kazakhstanangercrotonexus-2crownprovidercrsvparmacruisesbs" + - "chokoladencryptonomichigangwoncuisinellair-traffic-controlleycul" + - "turalcentertainmentoyotaris-a-hard-workercuneocupcakecxn--12cfi8" + - "ixb8lcyberlevagangaviikanonjis-a-huntercymrussiacyonabarunzencyo" + - "utheworkpccwildlifedorainfracloudcontrolledogawarabikomaezakirun" + - "orfolkebibleikangerfidonnakaniikawatanagurafieldfiguerestauranto" + - "yotsukaidownloadfilateliafilegearfilminamiechizenfinalfinancefin" + - "eartscientistockholmestrandfinlandfinnoyfirebaseapparscjohnsonfi" + - "renzefirestonefirmdaleirvikatsuyamasfjordenfishingolffanscotland" + - "fitjarfitnessettlementoyourafjalerflesbergulenflickragerotikakeg" + - "awaflightscrapper-siteflirflogintogurafloraflorencefloridavvesii" + - "dazaifudaigojomedizinhistorischescrappingunmarburguovdageaidnusl" + - "ivinghistoryfloripaderbornfloristanohatakahamangyshlakasamatsudo" + - "ntexisteingeekaufenflorogerserveftpartis-a-landscaperflowerserve" + - "game-serversicherungushikamifuranortonflynnhostingxn--1ck2e1bamb" + - "leclercasadelamonedatingjerstadotsuruokakudamatsuemrflynnhubanan" + - "arepublicaseihichisobetsuitainairforcechirealmetlifeinsuranceu-1" + - "fndfor-ourfor-someethnologyfor-theaterforexrothachirogatakahatak" + - "aishimogosenforgotdnservehalflifestyleforli-cesena-forlicesenafo" + - "rlikescandynamic-dnservehttpartnerservehumourforsaleitungsenfors" + - "andasuolodingenfortmissoulancashireggio-calabriafortworthadanose" + - "gawaforuminamifuranofosneserveirchernovtsykkylvenetogakushimotog" + - "anewyorkshirecipesaro-urbino-pesarourbinopesaromasvuotnaharimamu" + - "rogawassamukawataricohdatsunanjoburgriwataraidyndns-remotewdyndn" + - "s-serverdaluccapitalonewspaperfotaruis-a-lawyerfoxfordebianfredr" + - "ikstadtvserveminecraftoystre-slidrettozawafreeddnsgeekgalaxyfree" + - "masonryfreesitexascolipicenogiftservemp3freetlservep2partservepi" + - "cservequakefreiburgfreightcminamiiselectozsdeloittevadsoccertifi" + - "cationfresenius-4fribourgfriuli-v-giuliafriuli-ve-giuliafriuli-v" + - "egiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafr" + - "iuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafri" + - "uliveneziagiuliafriulivgiuliafrlfroganservesarcasmatartanddesign" + - "frognfrolandfrom-akrehamnfrom-alfrom-arfrom-azfrom-capebretonami" + - "astalowa-wolayangroupartyfrom-coguchikuzenfrom-ctrani-andria-bar" + - "letta-trani-andriafrom-dchirurgiens-dentistes-en-francefrom-dedy" + - "n-ip24from-flanderservicesettsurgeonshalloffamemergencyachtsevas" + - "topolefrom-gausdalfrom-higashiagatsumagoizumizakirkenesevenassis" + - "icilyfrom-iafrom-idfrom-ilfrom-incheonfrom-ksewilliamhillfrom-ky" + - "owariasahikawafrom-lancasterfrom-maniwakuratextileksvikautokeino" + - "from-mdfrom-megurokunohealthcareersharis-a-liberalfrom-microsoft" + - "bankazofrom-mnfrom-modellingfrom-msharpasadenamsosnowiechiryukyu" + - "ragifuchungbukharafrom-mtnfrom-nchitachinakagawatchandclockashih" + - "arafrom-ndfrom-nefrom-nhktraniandriabarlettatraniandriafrom-njcb" + - "nlfrom-nminamiizukamishihoronobeauxartsandcraftshawaiijimarugame" + - "-hostrolekamikitayamatsuris-a-libertarianfrom-nvalled-aostatoilf" + - "rom-nyfrom-ohkurafrom-oketohmannorth-kazakhstanfrom-orfrom-padov" + - "aksdalfrom-pratohnoshooguyfrom-rivnefrom-schoenbrunnfrom-sdfrom-" + - "tnfrom-txn--1ctwolominamatakkokamiokamiminershellaspeziafrom-uta" + - "zuerichardlillehammerfeste-ipassagenshimojis-a-linux-useranishia" + - "ritabashijonawatefrom-val-daostavalleyfrom-vtranoyfrom-wafrom-wi" + - "elunnerfrom-wvalledaostavangerfrom-wyfrosinonefrostalbanshimokaw" + - "afroyahikobeardubaiduckdnshimokitayamafstavernfujiiderafujikawag" + - "uchikonefujiminohtawaramotoineppubolognakanotoddenfujinomiyadafu" + - "jiokayamansionshimonitayanagithubusercontentransportransurlfujis" + - "atoshonairtelecitychyattorneyagawakuyabukidsmynasushiobaragusart" + - "shimonosekikawafujisawafujishiroishidakabiratoridefenseljordfuji" + - "tsurugashimaritimekeepingfujixeroxn--1lqs03nfujiyoshidafukayabea" + - "tshimosuwalkis-a-llamarylandfukuchiyamadafukudominichitosetogits" + - "uldalucernefukuis-a-musicianfukumitsubishigakirovogradoyfukuokaz" + - "akiryuohadselfipassenger-associationfukuroishikarikaturindalfuku" + - "sakisarazurewebsiteshikagamiishibukawafukuyamagatakaharufunabash" + - "iriuchinadafunagatakahashimamakishiwadafunahashikamiamakusatsuma" + - "sendaisennangonohejis-a-nascarfanfundaciofuoiskujukuriyamanxn--1" + - "lqs71dfuosskoczowinbarcelonagasakikonaikawachinaganoharamcoacham" + - "pionshiphoptobishimaizurugbydgoszczecinemakeupowiathletajimabari" + - "akembuchikumagayagawakkanaibetsubamericanfamilydscloudcontrolapp" + - "spotagerfurnitureggio-emilia-romagnakasatsunairtrafficplexus-1fu" + - "rubiraquarellebesbyenglandfurudonostiaarpaviancarrierfurukawais-" + - "a-nurservebbshimotsukefusodegaurafussagamiharafutabayamaguchinom" + - "igawafutboldlygoingnowhere-for-moregontrailroadfuttsurugimperiaf" + - "uturecmshimotsumafuturehostingfuturemailingfvgfylkesbiblackfrida" + - "yfyresdalhangglidinghangoutsystemscloudfunctionshinichinanhannan" + - "mokuizumodernhannotaireshinjournalisteinkjerusalembroideryhanyuz" + - "enhapmirhareidsbergenharstadharvestcelebrationhasamarcheapgfoggi" + - "ahasaminami-alpssells-itrapaniimimatakatoris-a-playerhashbanghas" + - "udahasura-appharmacienshinjukumanohasvikazunohatogayaitakamoriok" + - "aluganskolevangerhatoyamazakitahiroshimarnardalhatsukaichikaisei" + - "s-a-republicancerresearchaeologicaliforniahattfjelldalhayashimam" + - "otobungotakadapliernewmexicodyn-vpnplusterhazuminobusellsyourhom" + - "egoodshinkamigotoyohashimotoshimahboehringerikehelsinkitakamiizu" + - "misanofidelityhembygdsforbundhemneshinshinotsurgeryhemsedalhepfo" + - "rgeherokussldheroyhgtvallee-aosteroyhigashichichibunkyonanaoshim" + - "ageandsoundandvisionhigashihiroshimanehigashiizumozakitakatakana" + - "beautysfjordhigashikagawahigashikagurasoedahigashikawakitaaikita" + - "kyushuaiahigashikurumeiwamarriottravelchannelhigashimatsushimars" + - "hallstatebankddielddanuorrikuzentakataiwanairlinebraskaunjargals" + - "aceohigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycle" + - "shinshirohigashinarusembokukitamidoris-a-rockstarachowicehigashi" + - "nehigashiomihachimanchesterhigashiosakasayamanakakogawahigashish" + - "irakawamatakanezawahigashisumiyoshikawaminamiaikitamotosumy-rout" + - "erhigashitsunotogawahigashiurausukitanakagusukumoduminamiminowah" + - "igashiyamatokoriyamanashifteditchyouripharmacyshintokushimahigas" + - "hiyodogawahigashiyoshinogaris-a-socialistmein-vigorgehiraizumisa" + - "tohobby-sitehirakatashinagawahiranais-a-soxfanhirarahiratsukagaw" + - "ahirayaizuwakamatsubushikusakadogawahistorichouseshintomikasahar" + - "ahitachiomiyagildeskaliszhitachiotagooglecodespotravelersinsuran" + - "cehitraeumtgeradellogliastradinghjartdalhjelmelandholeckobierzyc" + - "eholidayhomeiphdhomelinkfhappouhomelinuxn--1qqw23ahomeofficehome" + - "securitymaceratakaokamakurazakitashiobarahomesecuritypchloehomes" + - "enseminehomeunixn--2m4a15ehondahoneywellbeingzonehongopocznorthw" + - "esternmutualhonjyoitakarazukameokameyamatotakadahornindalhorseou" + - "lminamiogunicomcastresistancehortendofinternet-dnshinyoshitomiok" + - "amogawahospitalhoteleshiojirishirifujiedahotmailhoyangerhoylande" + - "troitskydivinghumanitieshioyanaizuhurdalhurumajis-a-studentalhyl" + - "lestadhyogoris-a-teacherkassymantechnologyhyugawarahyundaiwafune" + - "hzchocolatemasekashiwarajewishartgalleryjfkharkovalleeaosteigenj" + - "gorajlcube-serverrankoshigayakumoldelmenhorstagejlljmphilipsynol" + - "ogy-diskstationjnjcphilatelyjoyokaichibahccavuotnagareyamalborkd" + - "alwaysdatabaseballangenoamishirasatochigiessensiositelemarkherso" + - "njpmorganjpnjprshiraokananporovigotpantheonsitejuniperjurkoshuna" + - "ntokigawakosugekotohiradomainshiratakahagitlaborkotourakouhokuta" + - "makis-an-artistcgrouphiladelphiaareadmyblogsitekounosupplieshish" + - "ikuis-an-engineeringkouyamashikokuchuokouzushimasoykozagawakozak" + - "is-an-entertainerkozowindmillkpnkppspdnshisognekrasnodarkredston" + - "ekristiansandcatshisuifuelblagdenesnaaseralingenkainanaejrietisa" + - "latinabenonichoshibuyachiyodavvenjargaulardalutskasukabedzin-the" + - "-bandaioiraseeklogest-mon-blogueurovisionisshingugekristiansundk" + - "rodsheradkrokstadelvaldaostarnbergkryminamisanrikubetsupportrent" + - "ino-alto-adigekumatorinokumejimasudakumenanyokkaichiropractichoy" + - "odobashichikashukujitawarakunisakis-bykunitachiarailwaykunitomig" + - "usukumamotoyamassa-carrara-massacarraramassabusinessebyklegalloc" + - "alhistoryggeelvinckhmelnytskyivanylvenicekunneppulawykunstsammlu" + - "ngkunstunddesignkuokgrouphoenixn--30rr7ykureggioemiliaromagnakay" + - "amatsumaebashikshacknetrentino-altoadigekurgankurobelaudiblebork" + - "angerkurogimilanokuroisoftwarendalenugkuromatsunais-certifieduca" + - "torahimeshimamateramochizukirakurotakikawasakis-foundationkushir" + - "ogawakustanais-gonekusupplykutchanelkutnokuzumakis-into-animelbo" + - "urnekvafjordkvalsundkvamlidlugolekafjordkvanangenkvinesdalkvinnh" + - "eradkviteseidskogkvitsoykwpspiegelkzmisugitokorozawamitourismola" + - "ngevagrarchaeologyeongbuknx-serveronakatsugawamitoyoakemiuramiya" + - "zumiyotamanomjondalenmlbfanmonstermonticellolmontrealestatefarme" + - "quipmentrentino-s-tirollagrigentomologyeonggiehtavuoatnagaivuotn" + - "agaokakyotambabia-goracleaningatlantabusebastopologyeongnamegawa" + - "keisenbahnmonza-brianzaporizhzhiamonza-e-della-brianzapposhitara" + - "mamonzabrianzaptokuyamatsusakahoginankokubunjis-leetnedalmonzaeb" + - "rianzaramonzaedellabrianzamoonscalezajskolobrzegersundmoparachut" + - "ingmordoviajessheiminamitanemoriyamatsushigemoriyoshimilitarymor" + - "monmouthagakhanamigawamoroyamatsuuramortgagemoscowindowshizukuis" + - "himofusaintlouis-a-bruinsfanmoseushistorymosjoenmoskeneshizuokan" + - "azawamosshoujis-lostre-toteneis-an-accountantshirahamatonbetsurn" + - "adalmosvikomaganemoteginowaniihamatamakawajimaoris-not-certified" + - "unetbankhakassiamoviemovistargardmtpchristiansburgrondarmtranbym" + - "uenstermuginozawaonsenmuikamisunagawamukochikushinonsenergymulho" + - "uservebeermunakatanemuncieszynmuosattemuphonefosshowamurmanskoma" + - "kiyosunndalmurotorcraftrentino-stirolmusashimurayamatsuzakis-sav" + - "edmusashinoharamuseetrentino-sud-tirolmuseumverenigingmusicargod" + - "addynaliascoli-picenogataijis-slickharkivgucciprianiigataishinom" + - "akinderoymutsuzawamy-vigorlicemy-wanggouvicenzamyactivedirectory" + - "myasustor-elvdalmycdn77-securecifedexhibitionmyddnskingmydissent" + - "rentino-sudtirolmydrobofagemydshowtimemorialmyeffectrentino-sued" + - "-tirolmyfirewallonieruchomoscienceandindustrynmyfritzmyftpaccess" + - "hriramsterdamnserverbaniamyfusionmyhome-serversaillesienarashino" + - "mykolaivaolbia-tempio-olbiatempioolbialystokkepnoduminamiuonumat" + - "sumotofukemymailermymediapchristmasakimobetsuliguriamyokohamamat" + - "sudamypephotographysiomypetsigdalmyphotoshibajddarchitecturealty" + - "dalipaymypsxn--32vp30hagebostadmysecuritycamerakermyshopblocksil" + - "komatsushimashikizunokunimihoboleslawiechonanbuilderschmidtre-ga" + - "uldalukowhalingroks-thisayamanobeokalmykiamytis-a-bloggermytulea" + - "piagetmyipictetrentino-suedtirolmyvnchromedicaltanissettairamywi" + - "reitrentinoa-adigepinkomforbarclays3-us-east-2pioneerpippupictur" + - "esimple-urlpiszpittsburghofauskedsmokorsetagayasells-for-usgarde" + - "npiwatepixolinopizzapkommunalforbundplanetariuminamiyamashirokaw" + - "anabelembetsukubanklabudhabikinokawabarthaebaruminamimakis-a-pai" + - "nteractivegarsheis-a-patsfanplantationplantslingplatformshangril" + - "anslupskommuneplaystationplazaplchryslerplumbingopmnpodzonepohlp" + - "oivronpokerpokrovskomonopolitiendapolkowicepoltavalle-aostarostw" + - "odzislawinnersnoasaitamatsukuris-uberleetrdpomorzeszowiosokaneya" + - "mazoepordenonepornporsangerporsanguidell-ogliastraderporsgrunnan" + - "poznanpraxis-a-bookkeeperugiaprdpreservationpresidioprgmrprimelh" + - "uscultureisenprincipeprivatizehealthinsuranceprochowiceproductio" + - "nsokndalprofbsbxn--12co0c3b4evalleaostaticschuleprogressivegasia" + - "promombetsurfbx-oschwarzgwangjuifminamidaitomangotsukisofukushim" + - "aparocherkasyno-dschweizpropertyprotectionprotonetrentinoaadigep" + - "rudentialpruszkowitdkomorotsukamisatokamachintaifun-dnsaliasdabu" + - "rprzeworskogptplusdecorativeartsolarssonpvtrentinoalto-adigepwch" + - "ungnamdalseidfjordyndns-weberlincolniyodogawapzqldqponqslgbtrent" + - "inoaltoadigequicksytesolognequipelementsolundbeckomvuxn--2scrj9c" + - "hoseiroumuenchenissandnessjoenissayokoshibahikariwanumatakazakis" + - "-a-greenissedaluroyqvchurchaseljeepsongdalenviknagatorodoystufft" + - "oread-booksnesomnaritakurashikis-very-badajozorastuttgartrentino" + - "sudtirolsusakis-very-evillagesusonosuzakaniepcesuzukanmakiwakuni" + - "gamidsundsuzukis-very-goodhandsonsvalbardunloppacificirclegnicaf" + - "ederationsveiosvelvikongsvingersvizzerasvn-reposooswedenswidnica" + - "rtierswiebodzindianapolis-a-anarchistoireggiocalabriaswiftcovers" + - "winoujscienceandhistoryswisshikis-very-nicesynology-dsopotrentin" + - "os-tirolturystykanoyaltakasakiwientuscanytushuissier-justicetuva" + - "lle-daostatic-accessorreisahayakawakamiichikawamisatotaltuxfamil" + - "ytwmailvbargainstitutelevisionaustdalimanowarudaustevollavangena" + - "turbruksgymnaturhistorisches3-eu-west-1venneslaskerrylogisticsor" + - "tlandvestfoldvestnesoruminanovestre-slidreamhostersouthcarolinaz" + - "awavestre-totennishiawakuravestvagoyvevelstadvibo-valentiavibova" + - "lentiavideovillaskimitsubatamicable-modemoneyvinnicartoonartdeco" + - "ffeedbackplaneapplinzis-very-sweetpeppervinnytsiavipsinaappilots" + - "irdalvirginiavirtualvirtueeldomeindianmarketingvirtuelvisakataki" + - "nouevistaprinternationalfirearmsouthwestfalenviterboltrevisohugh" + - "esor-odalvivoldavixn--3bst00mincommbankmpspbarclaycards3-sa-east" + - "-1vlaanderenvladikavkazimierz-dolnyvladimirvlogoipimientaketomis" + - "atolgavolkswagentsowavologdanskonskowolawavolvolkenkundenvolyngd" + - "alvossevangenvotevotingvotoyonakagyokutourspjelkavikongsbergwloc" + - "lawekonsulatrobeepilepsydneywmflabspreadbettingworldworse-thanda" + - "wowithgoogleapisa-hockeynutsiracusakakinokiawpdevcloudwritesthis" + - "blogsytewroclawithyoutubeneventoeidsvollwtcircustomerwtfbxoscien" + - "cecentersciencehistorywuozuwwwiwatsukiyonowruzhgorodeowzmiuwajim" + - "axn--42c2d9axn--45br5cylxn--45brj9citadeliveryxn--45q11citicatho" + - "licheltenham-radio-opencraftrainingripescaravantaaxn--4gbriminin" + - "gxn--4it168dxn--4it797kooris-an-actorxn--4pvxs4allxn--54b7fta0cc" + - "ivilaviationxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilisati" + - "onxn--5rtq34kopervikhmelnitskiyamashikexn--5su34j936bgsgxn--5tzm" + - "5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civili" + - "zationxn--80adxhkspydebergxn--80ao21axn--80aqecdr1axn--80asehdba" + - "rreauctionaval-d-aosta-valleyolasiteu-2xn--80aswgxn--80audnedaln" + - "xn--8ltr62koryokamikawanehonbetsurutaharaxn--8pvr4uxn--8y0a063ax" + - "n--90a3academy-firewall-gatewayxn--90aeroportalaheadjudaicaaarbo" + - "rteaches-yogasawaracingroks-theatreexn--90aishobaraomoriguchihar" + - "ahkkeravjuedischesapeakebayernrtritonxn--90azhytomyrxn--9dbhblg6" + - "dietcimdbarrel-of-knowledgemologicallimitediscountysvardolls3-us" + - "-gov-west-1xn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aropor" + - "t-byandexn--3ds443gxn--asky-iraxn--aurskog-hland-jnbarrell-of-kn" + - "owledgeologyombondiscoveryomitanobninskarasjohkaminokawanishiaiz" + - "ubangeu-3utilitiesquare7xn--avery-yuasakegawaxn--b-5gaxn--b4w605" + - "ferdxn--bck1b9a5dre4civilwarmanagementjxn--0trq7p7nnxn--bdddj-mr" + - "abdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccav" + - "uotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsu" + - "rreyxn--bjddar-ptamayufuettertdasnetzxn--blt-elabourxn--bmlo-gra" + - "ingerxn--bod-2naroyxn--brnny-wuaccident-investigation-aptiblease" + - "ating-organicbcn-north-1xn--brnnysund-m8accident-prevention-webh" + - "openairbusantiquest-a-la-maisondre-landebudapest-a-la-masionionj" + - "ukudoyamagentositelekommunikationthewifiat-band-campaniaxn--brum" + - "-voagatroandinosaurepbodynathomebuiltrentinosued-tirolxn--btsfjo" + - "rd-9zaxn--c1avgxn--c2br7gxn--c3s14minnesotaketakatsukis-into-car" + - "shiranukanagawaxn--cck2b3barsyonlinewhollandishakotanavigationav" + - "oibmdisrechtranakaiwamizawaweddingjesdalimoliserniaustinnatuurwe" + - "tenschappenaumburgjerdrumckinseyokosukanzakiyokawaragrocerybnika" + - "hokutobamaintenancebetsuikicks-assedic66xn--cg4bkis-with-theband" + - "ovre-eikerxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumintelligenc" + - "exn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr" + - "694bashkiriaustraliaisondriodejaneirochesterxn--czrs0trogstadxn-" + - "-czru2dxn--czrw28basilicataniaustrheimatunduhrennesoyokotebinore" + - "-og-uvdalaziobiraskvolloabathsbcasacamdvrcampobassociatestingjem" + - "nes3-ap-southeast-1xn--d1acj3basketballyngenavuotnaklodzkodairau" + - "thordalandroiddnss3-eu-west-2xn--d1alfaromeoxn--d1atromsaitomobe" + - "llevuelosangelesjaguarmeniaxn--d5qv7z876claimsardiniaxn--davvenj" + - "rga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluw" + - "erxn--drbak-wuaxn--dyry-iraxn--e1a4clanbibaidarq-axn--eckvdtc9dx" + - "n--efvn9srlxn--efvy88haibarakisosakitagawaxn--ehqz56nxn--elqq16h" + - "air-surveillancexn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct42" + - "9kosakaerodromegallupinbarefootballfinanzgoraurskog-holandroverh" + - "alla-speziaetnagahamaroygardenebakkeshibechambagriculturennebude" + - "jjudygarlandd-dnshome-webservercellikes-piedmontblancomeeres3-ap" + - "-south-1kappchizippodhaleangaviikadenadexetereport3l3p0rtargets-" + - "itargivestbytomaritimobaravennagasuke12hpalace164lima-cityeatsel" + - "inogradultarnobrzegyptianativeamericanantiques3-ap-northeast-133" + - "7xn--fhbeiarnxn--finny-yuaxn--fiq228c5hsrtrentinostirolxn--fiq64" + - "batodayonagoyautomotivecoalvdalaskanittedallasalleasinglesurance" + - "rtmgretagajoboji234xn--fiqs8srvaporcloudxn--fiqz9storagexn--fjor" + - "d-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn" + - "--frde-grandrapidstordalxn--frna-woaraisaijotromsojampagefrontap" + - "piemontexn--frya-hraxn--fzc2c9e2cldmailuxembourgrongaxn--fzys8d6" + - "9uvgmailxn--g2xx48clickasumigaurawa-mazowszextraspacekitagatajir" + - "issagaeroclubmedecincinnationwidealstahaugesunderseaportsinfolld" + - "alabamagasakishimabarackmazerbaijan-mayendoftheinternetflixilove" + - "collegefantasyleaguernseyxn--gckr3f0fedorapeopleirfjordynvpncher" + - "nivtsiciliaxn--gecrj9clinichernigovernmentjometacentruminamiawaj" + - "ikis-a-doctorayxn--ggaviika-8ya47hakatanoshiroomuraxn--gildeskl-" + - "g0axn--givuotna-8yasakaiminatoyonezawaxn--gjvik-wuaxn--gk3at1exn" + - "--gls-elacaixaxn--gmq050isleofmandalxn--gmqw5axn--h-2failxn--h1a" + - "eghakodatexn--h2breg3evenestorepaircraftrentinosud-tirolxn--h2br" + - "j9c8cliniquenoharaxn--h3cuzk1digitalxn--hbmer-xqaxn--hcesuolo-7y" + - "a35batsfjordivtasvuodnakamagayahababyglandivttasvuotnakamurataji" + - "mibuildingjovikarasjokarasuyamarylhurstjohnayorovnoceanographics" + - "3-us-west-1xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta-s4acctrus" + - "teexn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hx" + - "t814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn" + - "--indery-fyasugivingxn--io0a7issmarterthanyouxn--j1aefedoraproje" + - "ctoyotomiyazakis-a-knightpointtokaizukamikoaniikappugliaxn--j1am" + - "hakonexn--j6w193gxn--jlq61u9w7bauhausposts-and-telecommunication" + - "sncfdiyonaguniversityoriikarateu-4xn--jlster-byasuokanraxn--jrpe" + - "land-54axn--jvr189misakis-into-cartoonshiraois-a-techietis-a-the" + - "rapistoiaxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kf" + - "jord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--" + - "3e0b707exn--koluokta-7ya57hakubaghdadxn--kprw13dxn--kpry57dxn--k" + - "pu716fermodalenxn--kput3iwchofunatoriginsurecreationishiwakis-a-" + - "geekashiwazakiyosatokashikiyosemitexn--krager-gyatomitamamuraxn-" + - "-kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49j" + - "elenia-goraxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanumazu" + - "ryxn--kvnangen-k0axn--l-1fairwindstorfjordxn--l1accentureklambor" + - "ghiniizaxn--laheadju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ld" + - "ingen-q1axn--leagaviika-52bbcasertaipeiheijiitatebayashiibahcavu" + - "otnagaraholtalenvironmentalconservationflfanfshostrowiecasinordl" + - "andnpalermomahachijorpelandrangedalindashorokanaieverbankaratsug" + - "inamikatagamiharuconnectashkentatamotors3-us-west-2xn--lesund-hu" + - "axn--lgbbat1ad8jeonnamerikawauexn--lgrd-poaclintonoshoesarluxury" + - "xn--lhppi-xqaxn--linds-pramericanartrvareserveblogspotrentinosue" + - "dtirolxn--lns-qlapyatigorskypexn--loabt-0qaxn--lrdal-sraxn--lren" + - "skog-54axn--lt-liaclothingdustkakamigaharaxn--lten-granexn--lury" + - "-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddestorjdevclo" + - "udfrontdoorxn--mgb9awbferraraxn--mgba3a3ejtrysiljanxn--mgba3a4f1" + - "6axn--mgba3a4franamizuholdingsmilelverumisasaguris-into-gamessin" + - "atsukigatakasagotembaixadaxn--mgba7c0bbn0axn--mgbaakc7dvferrarit" + - "togoldpoint2thisamitsukexn--mgbaam7a8hakuis-a-personaltrainerxn-" + - "-mgbab2bdxn--mgbai9a5eva00bbtatarantottoriiyamanouchikuhokuryuga" + - "sakitaurayasudautoscanadaejeonbukaragandasnesoddenmarkhangelskja" + - "kdnepropetrovskiervaapsteiermark12xn--mgbai9azgqp6jetztrentino-a" + - "-adigexn--mgbayh7gpagespeedmobilizeroxn--mgbb9fbpobanazawaxn--mg" + - "bbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgberp4a5d4a87gxn--mgbe" + - "rp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskodjejuegosh" + - "ikiminokamoenairportland-4-salernoboribetsuckstpetersburgxn--mgb" + - "qly7c0a67fbcnsarpsborgrossetouchijiwadegreexn--mgbqly7cvafranzis" + - "kanerdpolicexn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bbvacations" + - "watch-and-clockerxn--mgbx4cd0abbottulanxessor-varangerxn--mix082" + - "ferreroticanonoichinomiyakexn--mix891fetsundyroyrvikinguitarscho" + - "larshipschoolxn--mjndalen-64axn--mk0axindustriesteamfamberkeleyx" + - "n--mk1bu44cntkmaxxn--11b4c3dyndns-wikinkobayashikaoirminamibosog" + - "ndaluzernxn--mkru45ixn--mlatvuopmi-s4axn--mli-tlaquilanciaxn--ml" + - "selv-iuaxn--moreke-juaxn--mori-qsakuhokkaidoomdnsiskinkyotobetsu" + - "midatlanticolognextdirectmparaglidingroundhandlingroznyxn--mosje" + - "n-eyawaraxn--mot-tlarvikoseis-an-actresshirakofuefukihaboromskog" + - "xn--mre-og-romsdal-qqbentleyoshiokaracoldwarmiamihamadaveroykeni" + - "waizumiotsukuibestadds3-external-1xn--msy-ula0hakusandiegoodyear" + - "xn--mtta-vrjjat-k7afamilycompanycolonialwilliamsburgrparisor-fro" + - "nxn--muost-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn-" + - "-3hcrj9cistrondheimmobilienxn--nit225kosherbrookegawaxn--nmesjev" + - "uemie-tcbalestrandabergamoarekexn--nnx388axn--nodessakuragawaxn-" + - "-nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-bya" + - "eservecounterstrikexn--nvuotna-hwaxn--nyqy26axn--o1achattanoogan" + - "ordre-landxn--o3cw4haldenxn--o3cyx2axn--od0algxn--od0aq3beppubli" + - "shproxyzgorzeleccollectionhlfanhs3-website-ap-northeast-1xn--ogb" + - "pf8flekkefjordxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osyro-wua" + - "xn--p1acfgujolsterxn--p1aixn--pbt977coloradoplateaudioxn--pgbs0d" + - "hlxn--porsgu-sta26fhvalerxn--pssu33lxn--pssy2uxn--q9jyb4columbus" + - "heyxn--qcka1pmcdonaldstreamuneuesolutionsomaxn--qqqt11misconfuse" + - "dxn--qxamusementunesorfoldxn--rady-iraxn--rdal-poaxn--rde-ulavag" + - "iskexn--rdy-0nabarixn--rennesy-v1axn--rhkkervju-01aflakstadaokag" + - "akibichuoxn--rholt-mragowoodsideltaitogliattirestudioxn--rhqv96g" + - "xn--rht27zxn--rht3dxn--rht61exn--risa-5narusawaxn--risr-iraxn--r" + - "land-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31halsaikitahatakama" + - "tsukawaxn--rovu88bernuorockartuzyukinfinitintuitateshinanomachim" + - "kentateyamavocatanzarowebspacebizenakanojohanamakinoharassnasaba" + - "erobatickets3-ap-southeast-2xn--rros-granvindafjordxn--rskog-uua" + - "xn--rst-0narutokyotangovtunkoninjamisonxn--rsta-francaiseharaxn-" + - "-rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithruheredumbrell" + - "ajollamericanexpressexyxn--s9brj9communitysnesarufutsunomiyawaka" + - "saikaitakoelnxn--sandnessjen-ogbizxn--sandy-yuaxn--seral-lraxn--" + - "ses554gxn--sgne-gratangenxn--skierv-utazaskoyabearalvahkijobserv" + - "erisignieznoipifonymishimatsunoxn--skjervy-v1axn--skjk-soaxn--sk" + - "nit-yqaxn--sknland-fxaxn--slat-5narviikamitondabayashiogamagoriz" + - "iaxn--slt-elabbvieeexn--smla-hraxn--smna-gratis-a-bulls-fanxn--s" + - "nase-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr" + - "-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeski" + - "dyn-o-saurlandes3-website-ap-southeast-1xn--srfold-byaxn--srreis" + - "a-q1axn--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalse" + - "n-sqbestbuyshouses3-website-ap-southeast-2xn--stre-toten-zcbstud" + - "yndns-at-homedepotenzamamicrolightingxn--t60b56axn--tckweatherch" + - "annelxn--tiq49xqyjevnakershuscountryestateofdelawarezzoologyxn--" + - "tjme-hraxn--tn0agrinet-freakstuff-4-salexn--tnsberg-q1axn--tor13" + - "1oxn--trany-yuaxn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr" + - "-vraxn--uc0atvarggatrentoyokawaxn--uc0ay4axn--uist22hammarfeasta" + - "fricapetownnews-stagingxn--uisz3gxn--unjrga-rtaobaokinawashirosa" + - "tochiokinoshimalatvuopmiasakuchinotsuchiurakawalesundxn--unup4yx" + - "n--uuwu58axn--vads-jraxn--vard-jraxn--vegrshei-c0axn--vermgensbe" + - "rater-ctbetainaboxfusejnynysadodgeometre-experts-comptables3-web" + - "site-eu-west-1xn--vermgensberatung-pwbieigersundray-dnsupdaterno" + - "pilawavoues3-fips-us-gov-west-1xn--vestvgy-ixa6oxn--vg-yiabcgxn-" + - "-vgan-qoaxn--vgsy-qoa0jewelryxn--vgu402comobilyxn--vhquvaroyxn--" + - "vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bi" + - "elawalmartatsunoceanographiquevje-og-hornnes3-website-sa-east-1x" + - "n--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1comparemarkerr" + - "yhotelsasayamaxn--wgbl6axn--xhq521biellaakesvuemieleccexn--xkc2a" + - "l3hye2axn--xkc2dl3a5ee0hamurakamigoris-a-photographerokuappfizer" + - "xn--y9a3aquariumissilewismillerxn--yer-znarvikoshimizumakis-an-a" + - "narchistoricalsocietyxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn-" + - "-3oq18vl8pn36axn--ystre-slidre-ujbieszczadygeyachimataikikuchiku" + - "seikarugamvikareliancexn--zbx025dxn--zf0ao64axn--zf0avxn--3pxu8k" + - "onyveloftrentino-aadigexn--zfr164bievatmallorcadaques3-website-u" + - "s-east-1xperiaxz" - -// nodes is the list of nodes. Each node is represented as a uint32, which -// encodes the node's children, wildcard bit and node type (as an index into -// the children array), ICANN bit and text. -// -// If the table was generated with the -comments flag, there is a //-comment -// after each node's data. In it is the nodes-array indexes of the children, -// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The -// nodeType is printed as + for normal, ! for exception, and o for parent-only -// nodes that have children but don't match a domain label in their own right. -// An I denotes an ICANN domain. -// -// The layout within the uint32, from MSB to LSB, is: -// [ 1 bits] unused -// [ 9 bits] children index -// [ 1 bits] ICANN bit -// [15 bits] text index -// [ 6 bits] text length -var nodes = [...]uint32{ - 0x31fe83, - 0x28e944, - 0x2ed8c6, - 0x380743, - 0x380746, - 0x3a5306, - 0x3b5e43, - 0x30a7c4, - 0x20d0c7, - 0x2ed508, - 0x1a07102, - 0x31f1c7, - 0x368c09, - 0x2d68ca, - 0x2d68cb, - 0x238503, - 0x2dec46, - 0x23d6c5, - 0x1e07542, - 0x21cf84, - 0x266d03, - 0x346145, - 0x22035c2, - 0x20a643, - 0x271f944, - 0x342285, - 0x2a10042, - 0x38a48e, - 0x255083, - 0x3affc6, - 0x2e00142, - 0x2d4207, - 0x240d86, - 0x3204f02, - 0x22ee43, - 0x256204, - 0x32d106, - 0x25b788, - 0x2811c6, - 0x378fc4, - 0x3600242, - 0x33b8c9, - 0x212107, - 0x2e6046, - 0x341809, - 0x2a0048, - 0x33a904, - 0x2a0f46, - 0x21f886, - 0x3a02d42, - 0x3a014f, - 0x28c84e, - 0x21bfc4, - 0x382c85, - 0x30a6c5, - 0x2e2109, - 0x249089, - 0x33b1c7, - 0x23f8c6, - 0x20ae43, - 0x3e01d42, - 0x2e3203, - 0x225d0a, - 0x20cac3, - 0x242f85, - 0x28e142, - 0x28e149, - 0x4200bc2, - 0x209204, - 0x28ad46, - 0x2e5c05, - 0x361644, - 0x4a1a344, - 0x203ec3, - 0x218d04, - 0x4e00702, - 0x2f8e84, - 0x52f5f04, - 0x339bca, - 0x5600f82, - 0x28bc47, - 0x281548, - 0x6206502, - 0x31d0c7, - 0x2c6d44, - 0x2c6d47, - 0x393c45, - 0x35e887, - 0x33af86, - 0x271dc4, - 0x378385, - 0x28ea47, - 0x72001c2, - 0x224143, - 0x200c42, - 0x200c43, - 0x760b5c2, - 0x20f4c5, - 0x7a01d02, - 0x357844, - 0x27e405, - 0x21bf07, - 0x25aece, - 0x2bf044, - 0x23df04, - 0x211c43, - 0x28a4c9, - 0x30eacb, - 0x2ea6c8, - 0x3415c8, - 0x306208, - 0x2b7288, - 0x33a74a, - 0x35e787, - 0x321606, - 0x7e8f282, - 0x36a683, - 0x377683, - 0x37fd44, - 0x3b5e83, - 0x32c343, - 0x1727e02, - 0x8203302, - 0x283f45, - 0x29e006, - 0x2da184, - 0x388547, - 0x2fa686, - 0x389384, - 0x3aa107, - 0x223d43, - 0x86cd5c2, - 0x8a0d342, - 0x8e1e642, - 0x21e646, - 0x9200002, - 0x2501c5, - 0x329343, - 0x201684, - 0x2efb04, - 0x2efb05, - 0x203c43, - 0x979c783, - 0x9a092c2, - 0x291d85, - 0x291d8b, - 0x343c06, - 0x21270b, - 0x226544, - 0x213a49, - 0x2148c4, - 0x9e14b02, - 0x215943, - 0x216283, - 0x1616b42, - 0x275fc3, - 0x216b4a, - 0xa201102, - 0x21d205, - 0x29a88a, - 0x2e0544, - 0x201103, - 0x325384, - 0x21ae03, - 0x21ae04, - 0x21ae07, - 0x21b605, - 0x21d685, - 0x21dc46, - 0x21dfc6, - 0x21ea43, - 0x222688, - 0x206c03, - 0xa60c702, - 0x245848, - 0x23614b, - 0x228908, - 0x228e06, - 0x229dc7, - 0x22da48, - 0xb6024c2, - 0xba430c2, - 0x32da08, - 0x233347, - 0x2e7b45, - 0x2e7b48, - 0x2c3b08, - 0x2be483, - 0x232e04, - 0x37fd82, - 0xbe34382, - 0xc23e102, - 0xca37302, - 0x237303, - 0xce01382, - 0x30a783, - 0x300f44, - 0x20a043, - 0x322844, - 0x20d7cb, - 0x2322c3, - 0x2e6a46, - 0x245f44, - 0x2982ce, - 0x381245, - 0x3b00c8, - 0x263347, - 0x26334a, - 0x22e803, - 0x317a07, - 0x30ec85, - 0x23a384, - 0x272706, - 0x272707, - 0x330f44, - 0x301f87, - 0x25a184, - 0x25b204, - 0x25b206, - 0x25f704, - 0x36bdc6, - 0x216983, - 0x233108, - 0x316ec8, - 0x23dec3, - 0x275f83, - 0x3a6604, - 0x3aae83, - 0xd235f42, - 0xd6df482, - 0x207143, - 0x203f86, - 0x2a1043, - 0x285184, - 0xda165c2, - 0x2165c3, - 0x35f083, - 0x21fe02, - 0xde008c2, - 0x2c9786, - 0x23e347, - 0x2fd645, - 0x38fd04, - 0x294d45, - 0x2f8a47, - 0x2add85, - 0x2e4689, - 0x2e9906, - 0x2ef808, - 0x2fd546, - 0xe20e982, - 0x2ddb08, - 0x300d06, - 0x219205, - 0x316887, - 0x316dc4, - 0x316dc5, - 0x281384, - 0x345d88, - 0xe6127c2, - 0xea04882, - 0x33ca06, - 0x2cf588, - 0x34d485, - 0x351546, - 0x356108, - 0x371488, - 0xee35dc5, - 0xf214f44, - 0x34e247, - 0xf614602, - 0xfa22902, - 0x10e0f882, - 0x28ae45, - 0x2aaa45, - 0x30af86, - 0x350007, - 0x386287, - 0x11638543, - 0x2b0307, - 0x30e7c8, - 0x3a0849, - 0x38a647, - 0x3b9c87, - 0x238788, - 0x238f86, - 0x239e86, - 0x23aacc, - 0x23c08a, - 0x23c407, - 0x23d58b, - 0x23e187, - 0x23e18e, - 0x19a3f304, - 0x240244, - 0x242547, - 0x3ac747, - 0x246d46, - 0x246d47, - 0x247407, - 0x19e29682, - 0x2495c6, - 0x2495ca, - 0x24a08b, - 0x24ac87, - 0x24b845, - 0x24bb83, - 0x24bdc6, - 0x24bdc7, - 0x20d283, - 0x1a206e02, - 0x24c78a, - 0x1a769d02, - 0x1aa4f282, - 0x1ae4dd42, - 0x1b240e82, - 0x24e9c5, - 0x24ef44, - 0x1ba1a442, - 0x2f8f05, - 0x24a683, - 0x2149c5, - 0x2b7184, - 0x205ec4, - 0x25a486, - 0x262586, - 0x291f83, - 0x204844, - 0x3894c3, - 0x1c204c82, - 0x210ac4, - 0x210ac6, - 0x34e7c5, - 0x37e946, - 0x316988, - 0x273544, - 0x266ac8, - 0x398785, - 0x22bc88, - 0x2b2dc6, - 0x26d907, - 0x233d84, - 0x233d86, - 0x242bc3, - 0x393fc3, - 0x211d08, - 0x322004, - 0x356747, - 0x20c7c6, - 0x2dedc9, - 0x322a88, - 0x325448, - 0x331ac4, - 0x35f103, - 0x229942, - 0x1d2234c2, - 0x1d61a202, - 0x36c083, - 0x1da08e02, - 0x20d204, - 0x3521c6, - 0x3b3745, - 0x24fa83, - 0x23cf44, - 0x2b95c7, - 0x25a783, - 0x251208, - 0x218405, - 0x264143, - 0x27e385, - 0x27e4c4, - 0x300a06, - 0x218f84, - 0x21ab86, - 0x21be46, - 0x210584, - 0x23e543, - 0x1de1a582, - 0x23dd05, - 0x20b9c3, - 0x1e20c882, - 0x23aa83, - 0x2231c5, - 0x23cac3, - 0x23cac9, - 0x1e606b82, - 0x1ee07842, - 0x2918c5, - 0x2211c6, - 0x2d9d46, - 0x2bb248, - 0x2bb24b, - 0x203fcb, - 0x220bc5, - 0x2fd845, - 0x2cdfc9, - 0x1600302, - 0x210748, - 0x213d44, - 0x1f601842, - 0x326403, - 0x1fecdd46, - 0x348e08, - 0x20208b42, - 0x2bdec8, - 0x2060c182, - 0x2bf7ca, - 0x20a3fd03, - 0x203606, - 0x36cc48, - 0x209708, - 0x3b3a46, - 0x37c807, - 0x3a0347, - 0x34daca, - 0x2e05c4, - 0x354d44, - 0x368649, - 0x2139fb45, - 0x28ca46, - 0x210083, - 0x253d44, - 0x2160df44, - 0x20df47, - 0x22c507, - 0x234404, - 0x2df805, - 0x30b048, - 0x375e07, - 0x381007, - 0x21a07602, - 0x32e984, - 0x29b188, - 0x2504c4, - 0x251844, - 0x251c45, - 0x251d87, - 0x222349, - 0x252a04, - 0x253149, - 0x253388, - 0x253ac4, - 0x253ac7, - 0x21e54003, - 0x254187, - 0x1609c42, - 0x16b4a42, - 0x254b86, - 0x2550c7, - 0x255584, - 0x257687, - 0x258d47, - 0x259983, - 0x2f6802, - 0x207d82, - 0x231683, - 0x231684, - 0x23168b, - 0x3416c8, - 0x263c84, - 0x25c985, - 0x25eb47, - 0x260105, - 0x2c8c0a, - 0x263bc3, - 0x22206b02, - 0x206b04, - 0x267189, - 0x26a743, - 0x26a807, - 0x373089, - 0x212508, - 0x2db543, - 0x282f07, - 0x283649, - 0x23d483, - 0x289844, - 0x28d209, - 0x290146, - 0x21c203, - 0x200182, - 0x264d83, - 0x2b4847, - 0x2c3e85, - 0x3413c6, - 0x259004, - 0x374e05, - 0x225cc3, - 0x20e646, - 0x213c42, - 0x3a1784, - 0x2260d382, - 0x226603, - 0x22a01802, - 0x251743, - 0x21e444, - 0x21e447, - 0x201986, - 0x20df02, - 0x22e0dec2, - 0x2c4244, - 0x23235182, - 0x23601b82, - 0x265704, - 0x265705, - 0x345105, - 0x35c386, - 0x23a074c2, - 0x2074c5, - 0x213005, - 0x2157c3, - 0x219d06, - 0x21a645, - 0x21e5c2, - 0x34d0c5, - 0x21e5c4, - 0x228203, - 0x22a443, - 0x23e11442, - 0x2dcf47, - 0x376084, - 0x376089, - 0x253c44, - 0x2357c3, - 0x300589, - 0x389e08, - 0x242aa8c4, - 0x2aa8c6, - 0x219983, - 0x25d3c3, - 0x323043, - 0x246eebc2, - 0x379b82, - 0x24a17202, - 0x32af48, - 0x358e08, - 0x3a5a46, - 0x2fd0c5, - 0x317885, - 0x333d07, - 0x2247c5, - 0x210642, - 0x24e04742, - 0x160a442, - 0x2447c8, - 0x2dda45, - 0x2bfbc4, - 0x2f2845, - 0x381d87, - 0x240944, - 0x24c682, - 0x25200582, - 0x33ffc4, - 0x21ca07, - 0x292507, - 0x35e844, - 0x29a843, - 0x23de04, - 0x23de08, - 0x23a1c6, - 0x27258a, - 0x222204, - 0x29abc8, - 0x290584, - 0x229ec6, - 0x29c484, - 0x28b146, - 0x376349, - 0x274847, - 0x241243, - 0x256351c2, - 0x2755c3, - 0x214d02, - 0x25a52e42, - 0x313486, - 0x374588, - 0x2ac047, - 0x3ab249, - 0x299f49, - 0x2acf05, - 0x2adec9, - 0x2ae685, - 0x2ae7c9, - 0x2afe45, - 0x2b11c8, - 0x25e0a104, - 0x26259ac7, - 0x2b13c3, - 0x2b13c7, - 0x3ba046, - 0x2b1a47, - 0x2a9b05, - 0x2a2cc3, - 0x26636d02, - 0x339704, - 0x26a42a42, - 0x266603, - 0x26e206c2, - 0x30df06, - 0x2814c5, - 0x2b3cc7, - 0x332043, - 0x32c2c4, - 0x217003, - 0x342c43, - 0x27205e82, - 0x27a0c442, - 0x3a5404, - 0x2f67c3, - 0x24e545, - 0x27e01c82, - 0x286007c2, - 0x2c8286, - 0x322144, - 0x38c444, - 0x38c44a, - 0x28e00942, - 0x38298a, - 0x39b8c8, - 0x29231604, - 0x2046c3, - 0x20d8c3, - 0x306349, - 0x25bd09, - 0x364986, - 0x29655783, - 0x335d45, - 0x30d2cd, - 0x39ba86, - 0x204f4b, - 0x29a02b02, - 0x225b48, - 0x2be22782, - 0x2c203e02, - 0x2b1685, - 0x2c604182, - 0x266847, - 0x21b987, - 0x20bf43, - 0x23b188, - 0x2ca02542, - 0x3780c4, - 0x21a8c3, - 0x348505, - 0x364603, - 0x33c406, - 0x212a84, - 0x275f43, - 0x2b6443, - 0x2ce09942, - 0x2fd7c4, - 0x379c85, - 0x3b6587, - 0x280003, - 0x2b5103, - 0x2b5c03, - 0x1631182, - 0x2b5cc3, - 0x2b63c3, - 0x2d2086c2, - 0x3a2e44, - 0x262786, - 0x34ba83, - 0x2086c3, - 0x2d6b8042, - 0x2b8048, - 0x2b8304, - 0x37ce46, - 0x2b8bc7, - 0x258346, - 0x2a0304, - 0x3b201702, - 0x3b9f0b, - 0x307c0e, - 0x221d4f, - 0x2ac5c3, - 0x3ba64d42, - 0x160b542, - 0x3be00a82, - 0x2e89c3, - 0x2e4903, - 0x2de046, - 0x207986, - 0x203007, - 0x304704, - 0x3c221302, - 0x3c618742, - 0x3a1205, - 0x2e7007, - 0x38c946, - 0x3ca28142, - 0x228144, - 0x2bc743, - 0x3ce09a02, - 0x3d366443, - 0x2bce04, - 0x2c5409, - 0x16cb602, - 0x3d605242, - 0x385d85, - 0x3dacb882, - 0x3de03582, - 0x3541c7, - 0x21b2c9, - 0x368e8b, - 0x3a0105, - 0x2714c9, - 0x384d06, - 0x343c47, - 0x3e206844, - 0x341d89, - 0x380907, - 0x348ac7, - 0x2122c3, - 0x2122c6, - 0x312247, - 0x263a43, - 0x263a46, - 0x3ea01cc2, - 0x3ee022c2, - 0x22bf03, - 0x32bec5, - 0x25a007, - 0x227906, - 0x2c3e05, - 0x207a84, - 0x28ddc5, - 0x2fae04, - 0x3f204bc2, - 0x337447, - 0x2ca604, - 0x24f3c4, - 0x25bc0d, - 0x25d749, - 0x3ab748, - 0x25e044, - 0x234a85, - 0x322907, - 0x3329c4, - 0x2fa747, - 0x204bc5, - 0x3f6ac504, - 0x2b5e05, - 0x269404, - 0x256fc6, - 0x34fe05, - 0x3fa048c2, - 0x2011c4, - 0x2011c5, - 0x3802c6, - 0x206d85, - 0x3c0144, - 0x2cda83, - 0x208d46, - 0x222545, - 0x22b605, - 0x34ff04, - 0x222283, - 0x22228c, - 0x3fe90a82, - 0x40206702, - 0x40600282, - 0x211a83, - 0x211a84, - 0x40a02942, - 0x2fba48, - 0x341485, - 0x34c984, - 0x36ee86, - 0x40e0d842, - 0x41234502, - 0x41601fc2, - 0x2a6a85, - 0x210446, - 0x226144, - 0x32d646, - 0x28ba06, - 0x215c83, - 0x41b2770a, - 0x2f6b05, - 0x2f6fc3, - 0x22a9c6, - 0x30c989, - 0x22a9c7, - 0x29f648, - 0x29ff09, - 0x241b08, - 0x22e546, - 0x209b03, - 0x41e0c202, - 0x395343, - 0x395349, - 0x333608, - 0x42253442, - 0x42604a82, - 0x229443, - 0x2e4505, - 0x25c404, - 0x2c9ec9, - 0x26eb44, - 0x2e0908, - 0x2050c3, - 0x20dc44, - 0x2acd03, - 0x221208, - 0x25bb47, - 0x42e281c2, - 0x270d02, - 0x388b05, - 0x272dc9, - 0x28cac3, - 0x284bc4, - 0x335d04, - 0x227543, - 0x28580a, - 0x43382842, - 0x43601182, - 0x2cd543, - 0x384f83, - 0x160dc02, - 0x20ffc3, - 0x43a14702, - 0x43e00802, - 0x4420f644, - 0x20f646, - 0x3b6a46, - 0x248c44, - 0x37d243, - 0x200803, - 0x2f60c3, - 0x24a406, - 0x30aa05, - 0x2cd6c7, - 0x343b09, - 0x2d2d85, - 0x2d3f46, - 0x2d4908, - 0x2d4b06, - 0x260ec4, - 0x2a1d8b, - 0x2d8403, - 0x2d8405, - 0x2d8548, - 0x22c2c2, - 0x3544c2, - 0x4464ea42, - 0x44a14642, - 0x221343, - 0x44e745c2, - 0x2745c3, - 0x2d8844, - 0x2d8e03, - 0x45605902, - 0x45a0c0c6, - 0x2af186, - 0x45edcac2, - 0x462162c2, - 0x4662a482, - 0x46a00e82, - 0x46e176c2, - 0x47202ec2, - 0x205383, - 0x344905, - 0x348206, - 0x4761bf84, - 0x34e5ca, - 0x20bd46, - 0x220e04, - 0x28a483, - 0x4820ea42, - 0x204d42, - 0x23d503, - 0x48608e83, - 0x2d8047, - 0x34fd07, - 0x49e31787, - 0x23fcc7, - 0x2309c3, - 0x33188a, - 0x263544, - 0x3863c4, - 0x3863ca, - 0x24b685, - 0x4a2190c2, - 0x254b43, - 0x4a601942, - 0x21b543, - 0x275583, - 0x4ae02b82, - 0x2b0284, - 0x2256c4, - 0x208105, - 0x39e745, - 0x2fc3c6, - 0x2fc746, - 0x4b206802, - 0x4b600982, - 0x3139c5, - 0x2aee92, - 0x259806, - 0x231483, - 0x315a06, - 0x231485, - 0x1616b82, - 0x53a17102, - 0x35fd43, - 0x217103, - 0x35d703, - 0x53e02c82, - 0x38a783, - 0x54205b82, - 0x20cc43, - 0x3a2e88, - 0x231e83, - 0x231e86, - 0x3b0c87, - 0x26c286, - 0x26c28b, - 0x220d47, - 0x339504, - 0x54a00e42, - 0x341305, - 0x54e08e43, - 0x2aec83, - 0x32de85, - 0x331783, - 0x55331786, - 0x2108ca, - 0x2488c3, - 0x240c44, - 0x2cf4c6, - 0x2364c6, - 0x55601a03, - 0x32c187, - 0x364887, - 0x2a3885, - 0x251046, - 0x222583, - 0x57619f43, - 0x57a0cb42, - 0x34bd44, - 0x22c24c, - 0x232f09, - 0x2445c7, - 0x38ad45, - 0x252c84, - 0x25e6c8, - 0x265d45, - 0x57e6c505, - 0x27b709, - 0x2e6103, - 0x24f204, - 0x5821cc82, - 0x221543, - 0x5869bf42, - 0x3bbe86, - 0x16235c2, - 0x58a35b42, - 0x2a6988, - 0x2ac343, - 0x2b5d47, - 0x2daa05, - 0x2e5205, - 0x2e520b, - 0x2e58c6, - 0x2e5406, - 0x2e9006, - 0x232b84, - 0x2e9246, - 0x58eeae88, - 0x246003, - 0x231a43, - 0x231a44, - 0x2ea484, - 0x2eab87, - 0x2ec3c5, - 0x592ec502, - 0x59607082, - 0x207085, - 0x295bc4, - 0x2ef38b, - 0x2efa08, - 0x2998c4, - 0x228182, - 0x59e99842, - 0x350e83, - 0x2efec4, - 0x2f0185, - 0x2f0607, - 0x2f2384, - 0x220c04, - 0x5a204102, - 0x36f5c9, - 0x2f3185, - 0x3a03c5, - 0x2f3e45, - 0x5a621483, - 0x2f4dc4, - 0x2f4dcb, - 0x2f5204, - 0x2f5c0b, - 0x2f6005, - 0x221e8a, - 0x2f7608, - 0x2f780a, - 0x2f7fc3, - 0x2f7fca, - 0x5aa33502, - 0x5ae2fa42, - 0x236903, - 0x5b2f9f02, - 0x2f9f03, - 0x5b71c482, - 0x5bb29ac2, - 0x2fac84, - 0x2227c6, - 0x32d385, - 0x2fd4c3, - 0x320446, - 0x317345, - 0x262a84, - 0x5be06b42, - 0x2ba844, - 0x2cdc4a, - 0x22fd07, - 0x2e5e86, - 0x2612c7, - 0x20c743, - 0x2bce48, - 0x39fd8b, - 0x230305, - 0x2f41c5, - 0x2f41c6, - 0x2ea004, - 0x3bf388, - 0x20e543, - 0x21f784, - 0x21f787, - 0x355746, - 0x344b06, - 0x29810a, - 0x250d44, - 0x250d4a, - 0x5c20c386, - 0x20c387, - 0x25ca07, - 0x27b0c4, - 0x27b0c9, - 0x262445, - 0x2439cb, - 0x2eef43, - 0x21ad43, - 0x5c625b03, - 0x23a584, - 0x5ca00482, - 0x2f70c6, - 0x5cea2a45, - 0x315c45, - 0x258586, - 0x352b04, - 0x5d2044c2, - 0x24bbc4, - 0x5d60b282, - 0x28b5c5, - 0x236c84, - 0x22cb43, - 0x5de17142, - 0x217143, - 0x273e86, - 0x5e204242, - 0x2241c8, - 0x22a844, - 0x22a846, - 0x204dc6, - 0x25ec04, - 0x208cc5, - 0x214e48, - 0x215647, - 0x2159c7, - 0x2159cf, - 0x29b086, - 0x22f483, - 0x22f484, - 0x36edc4, - 0x213103, - 0x22a004, - 0x2494c4, - 0x5e60fd02, - 0x291cc3, - 0x24bf43, - 0x5ea0d2c2, - 0x22f043, - 0x20d2c3, - 0x21d70a, - 0x2e7d07, - 0x381f0c, - 0x3821c6, - 0x2f5a86, - 0x2f6447, - 0x5ee0e947, - 0x252d49, - 0x245984, - 0x253e04, - 0x5f221382, - 0x5f600a02, - 0x2984c6, - 0x32bf84, - 0x2df606, - 0x239048, - 0x2bf2c4, - 0x266886, - 0x2d9d05, - 0x26e488, - 0x2041c3, - 0x26fd85, - 0x270b03, - 0x3a04c3, - 0x3a04c4, - 0x206ac3, - 0x5fa0e602, - 0x5fe00742, - 0x2eee09, - 0x273885, - 0x276bc4, - 0x27ab05, - 0x217e84, - 0x2c62c7, - 0x36ecc5, - 0x231944, - 0x231948, - 0x2d6206, - 0x2dac04, - 0x2e0788, - 0x2e1fc7, - 0x60202502, - 0x2e6f44, - 0x2131c4, - 0x348cc7, - 0x60602504, - 0x210f82, - 0x60a06742, - 0x227103, - 0x2dfc84, - 0x2b2143, - 0x370645, - 0x60e06d42, - 0x2eeac5, - 0x21b9c2, - 0x35c7c5, - 0x374745, - 0x61204d02, - 0x35f004, - 0x61606182, - 0x266d86, - 0x2a7806, - 0x272f08, - 0x2c7588, - 0x30de84, - 0x2f97c5, - 0x395809, - 0x2fd8c4, - 0x210884, - 0x208483, - 0x61a1f545, - 0x2cb6c7, - 0x28d004, - 0x31288d, - 0x332182, - 0x33f203, - 0x3479c3, - 0x61e00d02, - 0x397dc5, - 0x212cc7, - 0x23fd84, - 0x23fd87, - 0x2a0109, - 0x2cdd89, - 0x277e07, - 0x20f803, - 0x2ba348, - 0x2522c9, - 0x349c47, - 0x355685, - 0x395546, - 0x398bc6, - 0x3aaf05, - 0x25d845, - 0x62209142, - 0x37da45, - 0x2bad08, - 0x2c9546, - 0x626c0d47, - 0x2f6244, - 0x29bb07, - 0x300246, - 0x62a3b442, - 0x37ffc6, - 0x302d4a, - 0x3035c5, - 0x62ee6282, - 0x63260a02, - 0x312586, - 0x2b36c8, - 0x636926c7, - 0x63a04502, - 0x226783, - 0x36a846, - 0x22cf04, - 0x3b0b46, - 0x344e06, - 0x36d78a, - 0x377705, - 0x208806, - 0x2205c3, - 0x2205c4, - 0x203082, - 0x314a43, - 0x63e11ac2, - 0x2f8483, - 0x382c04, - 0x2b3804, - 0x2b380a, - 0x22e603, - 0x281288, - 0x22e60a, - 0x2b4247, - 0x309306, - 0x266c44, - 0x220cc2, - 0x228cc2, - 0x64207002, - 0x23ddc3, - 0x25c7c7, - 0x320707, - 0x28e8c4, - 0x39d147, - 0x2f0706, - 0x21e747, - 0x233484, - 0x398ac5, - 0x2ce485, - 0x6462be42, - 0x231146, - 0x327943, - 0x371742, - 0x383306, - 0x64a08bc2, - 0x64e05082, - 0x3c0985, - 0x6522a202, - 0x65604782, - 0x348085, - 0x39e345, - 0x2088c5, - 0x26f003, - 0x352285, - 0x2e5987, - 0x305cc5, - 0x311985, - 0x3b01c4, - 0x24d486, - 0x264544, - 0x65a00d42, - 0x666f2bc5, - 0x2ab647, - 0x3176c8, - 0x29f806, - 0x29f80d, - 0x2aac09, - 0x2aac12, - 0x359f05, - 0x36f8c3, - 0x66a08882, - 0x314544, - 0x39bb03, - 0x3963c5, - 0x304a45, - 0x66e1a902, - 0x264183, - 0x67231802, - 0x67a43242, - 0x67e1f342, - 0x2ed385, - 0x23fec3, - 0x36d408, - 0x68204382, - 0x686000c2, - 0x2b0246, - 0x35f2ca, - 0x205503, - 0x209f43, - 0x2ef103, - 0x69202642, - 0x77602cc2, - 0x77e0d582, - 0x206442, - 0x37fdc9, - 0x2caa44, - 0x23b488, - 0x782fd502, - 0x78603642, - 0x2f5e45, - 0x23d9c8, - 0x3a2fc8, - 0x25920c, - 0x22fac3, - 0x78a68dc2, - 0x78e0c402, - 0x2d3206, - 0x30a185, - 0x2a7b83, - 0x381c46, - 0x30a2c6, - 0x20d883, - 0x30bc43, - 0x30c146, - 0x30cd84, - 0x29d386, - 0x2d85c5, - 0x30d10a, - 0x2397c4, - 0x30e244, - 0x30f08a, - 0x79203442, - 0x2413c5, - 0x31018a, - 0x310a85, - 0x311344, - 0x311446, - 0x3115c4, - 0x221806, - 0x79611042, - 0x33c0c6, - 0x3b1b45, - 0x3b80c7, - 0x200206, - 0x2de844, - 0x2de847, - 0x327646, - 0x245345, - 0x245347, - 0x3abdc7, - 0x3abdce, - 0x232206, - 0x2fa605, - 0x202447, - 0x216303, - 0x3326c7, - 0x2172c5, - 0x21b0c4, - 0x2343c2, - 0x2432c7, - 0x304784, - 0x383884, - 0x270b8b, - 0x224e03, - 0x2d4c47, - 0x224e04, - 0x2f11c7, - 0x299543, - 0x33dd4d, - 0x398608, - 0x224604, - 0x231845, - 0x312bc5, - 0x313003, - 0x79a0c4c2, - 0x314a03, - 0x314d43, - 0x20f204, - 0x283745, - 0x22a4c7, - 0x220646, - 0x382943, - 0x38344b, - 0x259c8b, - 0x2ac9cb, - 0x2fbd4b, - 0x2c578a, - 0x30e48b, - 0x32420b, - 0x362f0c, - 0x38bf4b, - 0x3bdf51, - 0x3bfd8a, - 0x31604b, - 0x31630c, - 0x31660b, - 0x316b8a, - 0x317c8a, - 0x318c8e, - 0x31930b, - 0x3195ca, - 0x31a9d1, - 0x31ae0a, - 0x31b30b, - 0x31b84e, - 0x31c18c, - 0x31c68b, - 0x31c94e, - 0x31cccc, - 0x31d9ca, - 0x31eccc, - 0x79f1efca, - 0x31f7c8, - 0x320909, - 0x3232ca, - 0x32354a, - 0x3237cb, - 0x326d8e, - 0x327111, - 0x330189, - 0x3303ca, - 0x3313cb, - 0x334a0a, - 0x3354d6, - 0x336e4b, - 0x337b0a, - 0x337f4a, - 0x33a4cb, - 0x33b749, - 0x33e6c9, - 0x33ec8d, - 0x33f2cb, - 0x34040b, - 0x340dcb, - 0x347049, - 0x34768e, - 0x347dca, - 0x3494ca, - 0x349a0a, - 0x34a14b, - 0x34a98b, - 0x34ac4d, - 0x34c50d, - 0x34cd50, - 0x34d20b, - 0x35064c, - 0x3512cb, - 0x353ccb, - 0x35528e, - 0x355e0b, - 0x355e0d, - 0x35ae8b, - 0x35b90f, - 0x35bccb, - 0x35c50a, - 0x35cb49, - 0x35de09, - 0x35e18b, - 0x35e44e, - 0x36020b, - 0x361acf, - 0x36394b, - 0x363c0b, - 0x363ecb, - 0x3643ca, - 0x368a89, - 0x36e04f, - 0x372a8c, - 0x3732cc, - 0x37374e, - 0x373ccf, - 0x37408e, - 0x375690, - 0x375a8f, - 0x37660e, - 0x376f4c, - 0x377252, - 0x379891, - 0x37a18e, - 0x37a94e, - 0x37ae8e, - 0x37b20f, - 0x37b5ce, - 0x37b953, - 0x37be11, - 0x37c24c, - 0x37c54e, - 0x37c9cc, - 0x37de53, - 0x37ead0, - 0x37f30c, - 0x37f60c, - 0x37facb, - 0x38044e, - 0x380d8b, - 0x3816cb, - 0x382fcc, - 0x38b38a, - 0x38b74c, - 0x38ba4c, - 0x38bd49, - 0x38d7cb, - 0x38da88, - 0x38df49, - 0x38df4f, - 0x38f88b, - 0x7a39028a, - 0x391e4c, - 0x393009, - 0x393488, - 0x39368b, - 0x393d8b, - 0x39490a, - 0x394b8b, - 0x3950cc, - 0x396048, - 0x398d4b, - 0x39b1cb, - 0x39ef4e, - 0x3a05cb, - 0x3a1f0b, - 0x3ab94b, - 0x3abc09, - 0x3ac14d, - 0x3b1d4a, - 0x3b2c97, - 0x3b4398, - 0x3b6bc9, - 0x3b7d0b, - 0x3b8fd4, - 0x3b94cb, - 0x3b9a4a, - 0x3ba38a, - 0x3ba60b, - 0x3badd0, - 0x3bb1d1, - 0x3bc00a, - 0x3bd54d, - 0x3bdc4d, - 0x3c05cb, - 0x3c1206, - 0x231243, - 0x7a791143, - 0x26ed86, - 0x248805, - 0x22d287, - 0x3240c6, - 0x1608742, - 0x2c1fc9, - 0x320244, - 0x2e4d48, - 0x210943, - 0x314487, - 0x239202, - 0x2b3d03, - 0x7aa04542, - 0x2d0d06, - 0x2d2104, - 0x37a844, - 0x3443c3, - 0x3443c5, - 0x7b2cb8c2, - 0x7b6aeb44, - 0x27b007, - 0x7ba43282, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x200e03, - 0x207102, - 0x16fb88, - 0x20f882, - 0x323043, - 0x28cac3, - 0x208e83, - 0xe03, - 0x201a03, - 0x215443, - 0x32b7d6, - 0x32ca13, - 0x39cfc9, - 0x34e148, - 0x341189, - 0x310306, - 0x340010, - 0x24c9d3, - 0x355808, - 0x2a0a87, - 0x37d347, - 0x28db0a, - 0x232309, - 0x3961c9, - 0x28664b, - 0x33af86, - 0x20728a, - 0x228e06, - 0x31fe43, - 0x2dce85, - 0x233108, - 0x266e4d, - 0x28af0c, - 0x218c87, - 0x318fcd, - 0x214f44, - 0x23a84a, - 0x23bbca, - 0x23c08a, - 0x24ccc7, - 0x246b87, - 0x24a904, - 0x233d86, - 0x209d44, - 0x2c7ec8, - 0x26eb89, - 0x2bb246, - 0x2bb248, - 0x24d18d, - 0x2cdfc9, - 0x209708, - 0x3a0347, - 0x300fca, - 0x2550c6, - 0x2664c7, - 0x2bd584, - 0x292347, - 0x35180a, - 0x38690e, - 0x2247c5, - 0x29224b, - 0x32f709, - 0x25bd09, - 0x21b7c7, - 0x2936ca, - 0x348c07, - 0x307d49, - 0x20b808, - 0x33420b, - 0x2e4505, - 0x3ab60a, - 0x2734c9, - 0x331d0a, - 0x2d2e0b, - 0x38668b, - 0x2863d5, - 0x30be85, - 0x3a03c5, - 0x2f4dca, - 0x364a8a, - 0x32f487, - 0x2252c3, - 0x298448, - 0x2db34a, - 0x22a846, - 0x252109, - 0x26e488, - 0x2dac04, - 0x2b2149, - 0x2c7588, - 0x2b2d07, - 0x2f2bc6, - 0x2ab647, - 0x376d87, - 0x24a205, - 0x22460c, - 0x231845, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x20f882, - 0x238543, - 0x208e83, - 0x200e03, - 0x201a03, - 0x238543, - 0x208e83, - 0xe03, - 0x231e83, - 0x201a03, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0xe03, - 0x201a03, - 0x16fb88, - 0x20f882, - 0x201742, - 0x23c2c2, - 0x202542, - 0x200542, - 0x2e6dc2, - 0x4638543, - 0x23cac3, - 0x21b583, - 0x323043, - 0x255783, - 0x28cac3, - 0x2dcd86, - 0x208e83, - 0x201a03, - 0x20bdc3, - 0x16fb88, - 0x345b44, - 0x20da07, - 0x2112c3, - 0x2b1684, - 0x208543, - 0x21b843, - 0x323043, - 0x36dc7, - 0x145944, - 0xf183, - 0x145c05, - 0x207102, - 0x19c783, - 0x5a0f882, - 0x1490fc9, - 0x9144d, - 0x9178d, - 0x23c2c2, - 0x31604, - 0x145c49, - 0x200442, - 0x5f4ed48, - 0xf4544, - 0x16fb88, - 0x1409702, - 0x1510cc6, - 0x239283, - 0x2bcc43, - 0x6638543, - 0x23a844, - 0x6a3cac3, - 0x6f23043, - 0x205e82, - 0x231604, - 0x208e83, - 0x301dc3, - 0x2014c2, - 0x201a03, - 0x222dc2, - 0x2fabc3, - 0x204242, - 0x205983, - 0x26e543, - 0x200202, - 0x16fb88, - 0x239283, - 0x301dc3, - 0x2014c2, - 0x2fabc3, - 0x204242, - 0x205983, - 0x26e543, - 0x200202, - 0x2fabc3, - 0x204242, - 0x205983, - 0x26e543, - 0x200202, - 0x238543, - 0x39c783, - 0x238543, - 0x23cac3, - 0x323043, - 0x231604, - 0x255783, - 0x28cac3, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x20cb02, - 0x221483, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x39c783, - 0x20f882, - 0x238543, - 0x23cac3, - 0x323043, - 0x231604, - 0x208e83, - 0x201a03, - 0x355685, - 0x21a902, - 0x207102, - 0x16fb88, - 0x1480cc8, - 0x323043, - 0x20fec1, - 0x201641, - 0x203c01, - 0x201301, - 0x267401, - 0x2ae601, - 0x211341, - 0x28a0c1, - 0x24dfc1, - 0x2fbf81, - 0x200141, - 0x200001, - 0x131645, - 0x16fb88, - 0x2008c1, - 0x201781, - 0x200301, - 0x200081, - 0x200181, - 0x200401, - 0x200041, - 0x2086c1, - 0x200101, - 0x200281, - 0x200801, - 0x200981, - 0x200441, - 0x204101, - 0x2227c1, - 0x200341, - 0x200741, - 0x2002c1, - 0x2000c1, - 0x203441, - 0x200201, - 0x200c81, - 0x2005c1, - 0x204541, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x20f882, - 0x238543, - 0x23cac3, - 0x200442, - 0x201a03, - 0x36dc7, - 0x8cbc7, - 0x24386, - 0x44f4a, - 0x906c8, - 0x5c288, - 0x5c6c7, - 0xffc6, - 0xe1d45, - 0x11205, - 0x86286, - 0x12cf06, - 0x286644, - 0x31cf87, - 0x16fb88, - 0x2de944, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x238543, - 0x23cac3, - 0x21b583, - 0x323043, - 0x255783, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x21a902, - 0x2ba8c3, - 0x242043, - 0x2cc103, - 0x202d42, - 0x33eb43, - 0x203ec3, - 0x20fc03, - 0x200001, - 0x2ed0c5, - 0x203c43, - 0x226544, - 0x332083, - 0x322103, - 0x222903, - 0x383283, - 0xaa38543, - 0x240244, - 0x24ac83, - 0x207583, - 0x2228c3, - 0x23aa83, - 0x23cac3, - 0x23c803, - 0x202103, - 0x2aab03, - 0x322083, - 0x2bdec3, - 0x20df43, - 0x255684, - 0x257307, - 0x2f6802, - 0x25c003, - 0x263783, - 0x27e983, - 0x20fe03, - 0x20dec3, - 0xaf23043, - 0x209ac3, - 0x204c03, - 0x231603, - 0x34bc85, - 0x209c83, - 0x304d43, - 0xb207a83, - 0x374803, - 0x213643, - 0x229443, - 0x28cac3, - 0x22c2c2, - 0x20c0c3, - 0x208e83, - 0x1600e03, - 0x22b1c3, - 0x2014c3, - 0x21a743, - 0x201a03, - 0x36ea03, - 0x223583, - 0x221483, - 0x233503, - 0x30bcc3, - 0x2fad83, - 0x317345, - 0x20c843, - 0x2df706, - 0x2fadc3, - 0x349703, - 0x2205c4, - 0x20c9c3, - 0x386603, - 0x2f1a03, - 0x20bdc3, - 0x21a902, - 0x22fac3, - 0x30e403, - 0x30fac4, - 0x383884, - 0x21a5c3, - 0x16fb88, - 0x207102, - 0x200242, - 0x202d42, - 0x20cac2, - 0x201d02, - 0x201442, - 0x23de42, - 0x201842, - 0x207b02, - 0x201fc2, - 0x2281c2, - 0x214642, - 0x2745c2, - 0x20cb42, - 0x2e6dc2, - 0x21cc82, - 0x225b82, - 0x204102, - 0x2204c2, - 0x205842, - 0x200482, - 0x221dc2, - 0x2044c2, - 0x20d2c2, - 0x200a02, - 0x21f542, - 0x204782, - 0x7102, - 0x242, - 0x2d42, - 0xcac2, - 0x1d02, - 0x1442, - 0x3de42, - 0x1842, - 0x7b02, - 0x1fc2, - 0x281c2, - 0x14642, - 0x745c2, - 0xcb42, - 0xe6dc2, - 0x1cc82, - 0x25b82, - 0x4102, - 0x204c2, - 0x5842, - 0x482, - 0x21dc2, - 0x44c2, - 0xd2c2, - 0xa02, - 0x1f542, - 0x4782, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x2442, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x20f882, - 0x201a03, - 0xc638543, - 0x323043, - 0x28cac3, - 0x1a3443, - 0x219302, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x1a3443, - 0x201a03, - 0x4542, - 0x201c02, - 0x1442b45, - 0x232282, - 0x16fb88, - 0xf882, - 0x209d82, - 0x209b02, - 0x20ddc2, - 0x2190c2, - 0x206802, - 0x11205, - 0x201282, - 0x2014c2, - 0x202c82, - 0x200dc2, - 0x21cc82, - 0x3951c2, - 0x206742, - 0x260a42, - 0x36dc7, - 0x1501cd, - 0xe1dc9, - 0x5900b, - 0xe5848, - 0x56809, - 0x106046, - 0x323043, - 0x16fb88, - 0x145944, - 0xf183, - 0x145c05, - 0x16fb88, - 0x5d3c6, - 0x145c49, - 0x126447, - 0x207102, - 0x286644, - 0x20f882, - 0x238543, - 0x201742, - 0x23cac3, - 0x207b02, - 0x2de944, - 0x255783, - 0x253442, - 0x208e83, - 0x200442, - 0x201a03, - 0x3a03c6, - 0x323d8f, - 0x7156c3, - 0x16fb88, - 0x20f882, - 0x21b583, - 0x323043, - 0x28cac3, - 0xe03, - 0x152e1cb, - 0xe2648, - 0x14b7aca, - 0x14f5907, - 0x8dbcb, - 0x149785, - 0x36dc7, - 0x20f882, - 0x238543, - 0x323043, - 0x208e83, - 0x207102, - 0x200b42, - 0x2092c2, - 0xfe38543, - 0x248582, - 0x23cac3, - 0x209c42, - 0x20d382, - 0x323043, - 0x210642, - 0x259c42, - 0x2aeb02, - 0x2006c2, - 0x295e02, - 0x203102, - 0x200782, - 0x2351c2, - 0x2335c2, - 0x252e42, - 0x2b5102, - 0x2d2942, - 0x327982, - 0x2111c2, - 0x28cac3, - 0x200802, - 0x208e83, - 0x24d382, - 0x289e82, - 0x201a03, - 0x2485c2, - 0x20d2c2, - 0x221382, - 0x200742, - 0x204d02, - 0x2e6282, - 0x22be42, - 0x231802, - 0x2312c2, - 0x3195ca, - 0x35c50a, - 0x39090a, - 0x3c1382, - 0x208a82, - 0x212a42, - 0x10223fc9, - 0x1072c38a, - 0x1438547, - 0x10a02482, - 0x1416dc3, - 0x12c2, - 0x12c38a, - 0x252044, - 0x11238543, - 0x23cac3, - 0x253384, - 0x323043, - 0x231604, - 0x255783, - 0x28cac3, - 0x208e83, - 0xe3bc5, - 0x200e03, - 0x201a03, - 0x20c843, - 0x202443, - 0x16fb88, - 0x140ff44, - 0x1441c5, - 0x12620a, - 0x11ec42, - 0x1affc6, - 0x35ad1, - 0x11a23fc9, - 0x144248, - 0x10b388, - 0x8cf47, - 0xbc2, - 0x13164b, - 0x1b320a, - 0x71ca, - 0x26547, - 0x16fb88, - 0x114008, - 0x14507, - 0x17c2198b, - 0x23087, - 0xc702, - 0x5b907, - 0x1920a, - 0x8cc4f, - 0x4f70f, - 0x22902, - 0xf882, - 0xaaa48, - 0xe228a, - 0x6a08, - 0x64b88, - 0xdfbc8, - 0x4c82, - 0x42bcf, - 0xa670b, - 0xf8d08, - 0x3e607, - 0x185b8a, - 0x3af8b, - 0x57f89, - 0x185a87, - 0x6908, - 0x1089cc, - 0x81a87, - 0x1a800a, - 0xdd088, - 0x1aafce, - 0x2438e, - 0x2638b, - 0x27bcb, - 0x2920b, - 0x2c049, - 0x2ff8b, - 0x31ccd, - 0x329cb, - 0x62b4d, - 0x62ecd, - 0xfa44a, - 0x1836cb, - 0x3b64b, - 0x47085, - 0x1802cc10, - 0x12d40f, - 0x12db4f, - 0x37a4d, - 0xbf490, - 0xc182, - 0x18623a08, - 0x8ca48, - 0x18af52c5, - 0x52a0b, - 0x11f3d0, - 0x5ad08, - 0x6b0a, - 0x27d89, - 0x6b307, - 0x6b647, - 0x6b807, - 0x6bb87, - 0x6ca87, - 0x6d487, - 0x6ddc7, - 0x6e187, - 0x6f187, - 0x6f487, - 0x70147, - 0x70307, - 0x704c7, - 0x70687, - 0x70987, - 0x70e47, - 0x71707, - 0x72007, - 0x72c87, - 0x731c7, - 0x73387, - 0x73707, - 0x74487, - 0x74687, - 0x750c7, - 0x75287, - 0x75447, - 0x75dc7, - 0x76087, - 0x77a47, - 0x78187, - 0x78447, - 0x78bc7, - 0x78d87, - 0x79187, - 0x79687, - 0x79907, - 0x79d07, - 0x79ec7, - 0x7a087, - 0x7ae07, - 0x7c447, - 0x7c987, - 0x7cc87, - 0x7ce47, - 0x7d1c7, - 0x7d787, - 0x13c42, - 0x64c8a, - 0xe90c7, - 0x287c5, - 0x806d1, - 0x157c6, - 0x11318a, - 0xaa8ca, - 0x5d3c6, - 0xb880b, - 0x17202, - 0x3a1d1, - 0x1bbc89, - 0x9c0c9, - 0x351c2, - 0xa808a, - 0xac7c9, - 0xacf0f, - 0xada4e, - 0xae208, - 0x206c2, - 0xb649, - 0x1025ce, - 0xe8b4c, - 0xf328f, - 0x1a5b4e, - 0x1684c, - 0x18009, - 0x1c291, - 0x1f108, - 0x2ac92, - 0x2bb4d, - 0x33c4d, - 0x15208b, - 0x41cd5, - 0x164ec9, - 0xfcf8a, - 0x40809, - 0x4d650, - 0x4e70b, - 0x5898f, - 0x6390b, - 0x7298c, - 0x77650, - 0x8430a, - 0x853cd, - 0x894ce, - 0x8ef4a, - 0xede0c, - 0x176a54, - 0x1bb911, - 0x95a8b, - 0x97fcf, - 0xa290d, - 0xa76ce, - 0xb2bcc, - 0xb330c, - 0x160b0b, - 0x160e0e, - 0xd6750, - 0x11868b, - 0x1876cd, - 0x1bce4f, - 0xba0cc, - 0xbb0ce, - 0xbc011, - 0xc7c4c, - 0xc9307, - 0xc9c0d, - 0x130d4c, - 0x1605d0, - 0x174c0d, - 0xd1b47, - 0xd7c10, - 0xdd6c8, - 0xf178b, - 0x134c4f, - 0x3ef48, - 0x11338d, - 0x15c750, - 0x172e49, - 0x18e086c6, - 0xb8243, - 0xbc445, - 0x9a02, - 0x143889, - 0x5e04a, - 0x10fb06, - 0x2594a, - 0x1900c949, - 0x1c003, - 0xdebd1, - 0xdf009, - 0xe0407, - 0x35c4b, - 0xe67d0, - 0xe6c8c, - 0xe8e48, - 0xe9805, - 0xb988, - 0x1ad4ca, - 0x1c0c7, - 0x16bac7, - 0x982, - 0x12bcca, - 0x12e7c9, - 0x79545, - 0x402ca, - 0x9260f, - 0x4b8cb, - 0x14bd4c, - 0x17a492, - 0x94e45, - 0xec1c8, - 0x17618a, - 0x196f3d05, - 0x190ecc, - 0x129ac3, - 0x1951c2, - 0xfb30a, - 0x14fb70c, - 0x14f508, - 0x62d08, - 0x36d47, - 0xb282, - 0x4242, - 0x47590, - 0xa02, - 0x3904f, - 0x86286, - 0x7c0e, - 0xebbcb, - 0x8f148, - 0xda049, - 0x18f052, - 0x95cd, - 0x586c8, - 0x58ec9, - 0x5d50d, - 0x5e4c9, - 0x5e88b, - 0x60648, - 0x65808, - 0x65b88, - 0x65e49, - 0x6604a, - 0x6a98c, - 0xeb04a, - 0x10bd07, - 0x1f54d, - 0xfde8b, - 0x12004c, - 0x404c8, - 0x4f049, - 0x1b01d0, - 0xc2, - 0x2d3cd, - 0x2642, - 0x2cc2, - 0x10bc4a, - 0x11308a, - 0x11438b, - 0x3b80c, - 0x113b0a, - 0x113d8e, - 0xf2cd, - 0x11d708, - 0x4542, - 0x11f46c0e, - 0x1260ee4e, - 0x12f43f8a, - 0x1373a14e, - 0x13f9d38e, - 0x1460138c, - 0x1438547, - 0x1438549, - 0x1416dc3, - 0x14e3700c, - 0x15707789, - 0x15f3b509, - 0x12c2, - 0x146b51, - 0xed91, - 0x143ecd, - 0x13a091, - 0x19d2d1, - 0x12cf, - 0x36f4f, - 0x1076cc, - 0x13b44c, - 0x18954d, - 0x1b5295, - 0x10ed8c, - 0xea88c, - 0x122ed0, - 0x158fcc, - 0x16d9cc, - 0x191819, - 0x1a83d9, - 0x1aa459, - 0x1b3e94, - 0x1b8ad4, - 0x1c0d14, - 0x2394, - 0x3754, - 0x1670ee49, - 0x16dc0fc9, - 0x176ea949, - 0x1221f309, - 0x12c2, - 0x12a1f309, - 0x12c2, - 0x238a, - 0x12c2, - 0x1321f309, - 0x12c2, - 0x238a, - 0x12c2, - 0x13a1f309, - 0x12c2, - 0x1421f309, - 0x12c2, - 0x14a1f309, - 0x12c2, - 0x238a, - 0x12c2, - 0x1521f309, - 0x12c2, - 0x238a, - 0x12c2, - 0x15a1f309, - 0x12c2, - 0x1621f309, - 0x12c2, - 0x238a, - 0x12c2, - 0x16a1f309, - 0x12c2, - 0x1721f309, - 0x12c2, - 0x17a1f309, - 0x12c2, - 0x238a, - 0x12c2, - 0x35ac5, - 0x1b3204, - 0x146c0e, - 0xee4e, - 0x143f8a, - 0x13a14e, - 0x19d38e, - 0x138c, - 0x3700c, - 0x107789, - 0x13b509, - 0x10ee49, - 0x1c0fc9, - 0xea949, - 0x122f8d, - 0x2649, - 0x3a09, - 0x5bf04, - 0x11d8c4, - 0x126144, - 0x15f784, - 0x8de84, - 0x4b744, - 0x6e44, - 0x67344, - 0x8cf44, - 0x157e2c3, - 0xc182, - 0xf2c3, - 0x4c82, - 0x207102, - 0x20f882, - 0x201742, - 0x207602, - 0x207b02, - 0x200442, - 0x204242, - 0x238543, - 0x23cac3, - 0x323043, - 0x231603, - 0x208e83, - 0x201a03, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x208e83, - 0x201a03, - 0x160c3, - 0x323043, - 0x31604, - 0x207102, - 0x39c783, - 0x1b638543, - 0x2bf347, - 0x323043, - 0x211a83, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x243d0a, - 0x3a03c5, - 0x221483, - 0x205082, - 0x16fb88, - 0x16fb88, - 0xf882, - 0x127482, - 0x1bf51b0b, - 0x5ba45, - 0x35dc5, - 0x114b46, - 0x145944, - 0xf183, - 0x145c05, - 0x131645, - 0x16fb88, - 0x23087, - 0x38543, - 0x1c644d87, - 0x1432c6, - 0x1c93b345, - 0x143387, - 0x1b4d0a, - 0x1b4bc8, - 0x11887, - 0x6df88, - 0x99707, - 0x152cf, - 0x435c7, - 0x150d86, - 0x11f3d0, - 0x12a58f, - 0x20a89, - 0x10fb84, - 0x1cd4344e, - 0xb098c, - 0x5810a, - 0xa7987, - 0x3520a, - 0xbb49, - 0xb514c, - 0x4304a, - 0x5ec8a, - 0x145c49, - 0x10fb06, - 0xa7a4a, - 0xe8a, - 0xa4e49, - 0xde488, - 0xde786, - 0xe284d, - 0xbc8c5, - 0x126447, - 0x1019c9, - 0xf72c7, - 0xb5ed4, - 0x103acb, - 0xf8b4a, - 0xab10d, - 0xd3c3, - 0xd3c3, - 0x24386, - 0xd3c3, - 0x19c783, - 0x16fb88, - 0xf882, - 0x53384, - 0x5f843, - 0x155685, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x203ec3, - 0x238543, - 0x23cac3, - 0x21b583, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x29c283, - 0x202443, - 0x203ec3, - 0x286644, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x206683, - 0x238543, - 0x23cac3, - 0x207603, - 0x21b583, - 0x323043, - 0x231604, - 0x3797c3, - 0x229443, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x221483, - 0x36a883, - 0x1ea38543, - 0x23cac3, - 0x250ac3, - 0x323043, - 0x212143, - 0x229443, - 0x201a03, - 0x204103, - 0x35f584, - 0x16fb88, - 0x1f238543, - 0x23cac3, - 0x2ae2c3, - 0x323043, - 0x28cac3, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x20e943, - 0x16fb88, - 0x1fa38543, - 0x23cac3, - 0x21b583, - 0x200e03, - 0x201a03, - 0x16fb88, - 0x1438547, - 0x39c783, - 0x238543, - 0x23cac3, - 0x323043, - 0x231604, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x131645, - 0x36dc7, - 0xb610b, - 0xdf404, - 0xbc8c5, - 0x1480cc8, - 0xae90d, - 0x20e6c505, - 0x7bd44, - 0x10c3, - 0x172d45, - 0x33b145, - 0x16fb88, - 0xd3c2, - 0x2bc3, - 0xf9306, - 0x31f948, - 0x3347c7, - 0x286644, - 0x39c286, - 0x3b5146, - 0x16fb88, - 0x2ddac3, - 0x342a49, - 0x26d615, - 0x6d61f, - 0x238543, - 0x3b3a52, - 0xf6306, - 0x114dc5, - 0x6b0a, - 0x27d89, - 0x3b380f, - 0x2de944, - 0x3490c5, - 0x304b10, - 0x34e347, - 0x200e03, - 0x293408, - 0x12ce46, - 0x29630a, - 0x230f04, - 0x2f3743, - 0x3a03c6, - 0x205082, - 0x22facb, - 0xe03, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x2f9a03, - 0x20f882, - 0x6ed43, - 0x208e83, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x211a83, - 0x228243, - 0x201a03, - 0x20f882, - 0x238543, - 0x23cac3, - 0x208e83, - 0xe03, - 0x201a03, - 0x207102, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x35dc5, - 0x286644, - 0x238543, - 0x23cac3, - 0x20f644, - 0x208e83, - 0x201a03, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x1a3443, - 0x201a03, - 0x238543, - 0x23cac3, - 0x21b583, - 0x204c03, - 0x28cac3, - 0x208e83, - 0xe03, - 0x201a03, - 0x20f882, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x210543, - 0x707c3, - 0x11a83, - 0x208e83, - 0x201a03, - 0x3195ca, - 0x335289, - 0x35438b, - 0x35490a, - 0x35c50a, - 0x369bcb, - 0x38274a, - 0x38b38a, - 0x39090a, - 0x390b8b, - 0x3ad209, - 0x3af10a, - 0x3af7cb, - 0x3b978b, - 0x3bfb4a, - 0x238543, - 0x23cac3, - 0x21b583, - 0x28cac3, - 0x208e83, - 0xe03, - 0x201a03, - 0x35dcb, - 0x651c8, - 0x1174c9, - 0x16fb88, - 0x238543, - 0x26b304, - 0x20b342, - 0x21bf84, - 0x346145, - 0x203ec3, - 0x286644, - 0x238543, - 0x240244, - 0x23cac3, - 0x253384, - 0x2de944, - 0x231604, - 0x229443, - 0x208e83, - 0x201a03, - 0x22d585, - 0x206683, - 0x221483, - 0x20ec43, - 0x231944, - 0x20fe84, - 0x2cc105, - 0x16fb88, - 0x30dc84, - 0x36bdc6, - 0x281384, - 0x20f882, - 0x381107, - 0x254d87, - 0x251844, - 0x260105, - 0x374e05, - 0x2b13c5, - 0x231604, - 0x2cf6c8, - 0x23eb46, - 0x3bffc8, - 0x257cc5, - 0x2e4505, - 0x263544, - 0x201a03, - 0x2f4544, - 0x368dc6, - 0x3a04c3, - 0x231944, - 0x280bc5, - 0x2e4ac4, - 0x34da44, - 0x205082, - 0x2669c6, - 0x3a2906, - 0x30a185, - 0x207102, - 0x39c783, - 0x2760f882, - 0x223b84, - 0x207b02, - 0x28cac3, - 0x200e82, - 0x208e83, - 0x200442, - 0x215443, - 0x202443, - 0x16fb88, - 0x16fb88, - 0x323043, - 0x207102, - 0x2820f882, - 0x323043, - 0x270443, - 0x3797c3, - 0x32e5c4, - 0x208e83, - 0x201a03, - 0x16fb88, - 0x207102, - 0x28a0f882, - 0x238543, - 0x208e83, - 0xe03, - 0x201a03, - 0x482, - 0x208882, - 0x21a902, - 0x211a83, - 0x2ef783, - 0x207102, - 0x131645, - 0x16fb88, - 0x36dc7, - 0x20f882, - 0x23cac3, - 0x253384, - 0x2020c3, - 0x323043, - 0x204c03, - 0x28cac3, - 0x208e83, - 0x21eb43, - 0x201a03, - 0x2252c3, - 0x122213, - 0x124cd4, - 0x36dc7, - 0x139986, - 0x5e24b, - 0x24386, - 0x5c0c7, - 0x120589, - 0xe838a, - 0x9058d, - 0x14fecc, - 0x3954a, - 0x11205, - 0x1b4d48, - 0x86286, - 0x31586, - 0x12cf06, - 0x20c182, - 0x10b14c, - 0x1b33c7, - 0x2a691, - 0x238543, - 0x6df05, - 0x7588, - 0x18ec4, - 0x29cbe1c6, - 0x806c6, - 0xb9a06, - 0x960ca, - 0xb4003, - 0x2a24c984, - 0xe8345, - 0x18e43, - 0x2a63dc47, - 0xe3bc5, - 0xb88cc, - 0xf7a88, - 0xbd248, - 0xa6589, - 0x14dc08, - 0x1425886, - 0x2ab71549, - 0x14978a, - 0x16308, - 0x114b48, - 0x8cf44, - 0xb5ac5, - 0x2ae42bc3, - 0x2b332106, - 0x2b6f4dc4, - 0x2bb39d87, - 0x114b44, - 0x114b44, - 0x114b44, - 0x114b44, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x207102, - 0x20f882, - 0x323043, - 0x205e82, - 0x208e83, - 0x201a03, - 0x215443, - 0x373ccf, - 0x37408e, - 0x16fb88, - 0x238543, - 0x4db87, - 0x23cac3, - 0x323043, - 0x255783, - 0x208e83, - 0x201a03, - 0x20d4c3, - 0x20d4c7, - 0x200142, - 0x2ce609, - 0x200242, - 0x24788b, - 0x2c110a, - 0x2c67c9, - 0x201242, - 0x2100c6, - 0x26cd95, - 0x2479d5, - 0x275793, - 0x247f53, - 0x201d42, - 0x212c45, - 0x31d44c, - 0x27c6cb, - 0x29c705, - 0x20cac2, - 0x28e142, - 0x384c06, - 0x200bc2, - 0x3acc46, - 0x2dd20d, - 0x26540c, - 0x22cc84, - 0x200f82, - 0x203402, - 0x22b048, - 0x201d02, - 0x20a746, - 0x28bf04, - 0x26cf55, - 0x275913, - 0x216d03, - 0x33844a, - 0x205407, - 0x3145c9, - 0x38d4c7, - 0x20d342, - 0x200002, - 0x3ba886, - 0x212702, - 0x16fb88, - 0x216b42, - 0x201102, - 0x27f847, - 0x217387, - 0x222d85, - 0x20c702, - 0x225287, - 0x225448, - 0x2024c2, - 0x2430c2, - 0x237302, - 0x201382, - 0x242688, - 0x20a043, - 0x25fa08, - 0x2e9b0d, - 0x2322c3, - 0x32ec08, - 0x245f4f, - 0x24630e, - 0x339a4a, - 0x22e811, - 0x22ec90, - 0x2c34cd, - 0x2c380c, - 0x36a707, - 0x3385c7, - 0x39c349, - 0x20d302, - 0x201442, - 0x25db0c, - 0x25de0b, - 0x2008c2, - 0x360cc6, - 0x20e982, - 0x204882, - 0x222902, - 0x20f882, - 0x3b69c4, - 0x244387, - 0x229682, - 0x24a347, - 0x24b547, - 0x20d282, - 0x20c8c2, - 0x24da45, - 0x21a442, - 0x2f290e, - 0x2ab3cd, - 0x23cac3, - 0x28d58e, - 0x2c5c0d, - 0x25ac43, - 0x201482, - 0x2891c4, - 0x216582, - 0x20fac2, - 0x364145, - 0x373587, - 0x393202, - 0x207602, - 0x252f87, - 0x255ac8, - 0x2f6802, - 0x294ec6, - 0x25d98c, - 0x25dccb, - 0x206b02, - 0x26764f, - 0x267a10, - 0x267e0f, - 0x2681d5, - 0x268714, - 0x268c0e, - 0x268f8e, - 0x26930f, - 0x2696ce, - 0x269a54, - 0x269f53, - 0x26a40d, - 0x27d949, - 0x291ac3, - 0x201802, - 0x2b7505, - 0x206346, - 0x207b02, - 0x3a4ec7, - 0x323043, - 0x217202, - 0x37e548, - 0x22ea51, - 0x22ee90, - 0x2007c2, - 0x290e07, - 0x204182, - 0x332b07, - 0x209a02, - 0x342089, - 0x384bc7, - 0x27ac08, - 0x2be006, - 0x2ef683, - 0x339205, - 0x2022c2, - 0x207a82, - 0x3bac85, - 0x391345, - 0x204bc2, - 0x231043, - 0x2e4b47, - 0x205747, - 0x200502, - 0x25f1c4, - 0x211b83, - 0x211b89, - 0x215148, - 0x200282, - 0x202942, - 0x242387, - 0x263285, - 0x2ad208, - 0x215c87, - 0x21a243, - 0x294c86, - 0x2c334d, - 0x2c36cc, - 0x2c8346, - 0x209b02, - 0x20c202, - 0x204a82, - 0x245dcf, - 0x2461ce, - 0x374e87, - 0x20b302, - 0x2c72c5, - 0x2c72c6, - 0x214702, - 0x200802, - 0x228246, - 0x2b57c3, - 0x332a46, - 0x2d0285, - 0x2d028d, - 0x2d0855, - 0x2d108c, - 0x2d1e4d, - 0x2d2212, - 0x214642, - 0x2745c2, - 0x202ec2, - 0x249386, - 0x302486, - 0x200982, - 0x2063c6, - 0x202c82, - 0x39b505, - 0x200542, - 0x2ab4c9, - 0x2e324c, - 0x2e358b, - 0x200442, - 0x257708, - 0x2052c2, - 0x20cb42, - 0x278ec6, - 0x21f285, - 0x36c107, - 0x24bc85, - 0x28ea05, - 0x235d82, - 0x219a42, - 0x21cc82, - 0x2f3587, - 0x2613cd, - 0x26174c, - 0x317947, - 0x2235c2, - 0x225b82, - 0x23f688, - 0x343a08, - 0x34c008, - 0x313344, - 0x361087, - 0x2efc43, - 0x299842, - 0x206682, - 0x2f2149, - 0x3ab3c7, - 0x204102, - 0x2792c5, - 0x22fa42, - 0x236902, - 0x35dc83, - 0x35dc86, - 0x2f9a02, - 0x2fab42, - 0x200c02, - 0x281e06, - 0x345607, - 0x221282, - 0x206b42, - 0x25f84f, - 0x28d3cd, - 0x3029ce, - 0x2c5a8c, - 0x201a42, - 0x204142, - 0x2bde45, - 0x317e46, - 0x209002, - 0x205842, - 0x200482, - 0x215c04, - 0x2e9984, - 0x2b8706, - 0x204242, - 0x37d6c7, - 0x233803, - 0x233808, - 0x33cb48, - 0x240687, - 0x249286, - 0x202502, - 0x242603, - 0x351107, - 0x26ffc6, - 0x2e2d05, - 0x3136c8, - 0x206182, - 0x337547, - 0x21f542, - 0x332182, - 0x207f02, - 0x2e95c9, - 0x23b442, - 0x2018c2, - 0x248383, - 0x377787, - 0x2002c2, - 0x2e33cc, - 0x2e36cb, - 0x2c83c6, - 0x218d85, - 0x22a202, - 0x204782, - 0x2c1486, - 0x237e83, - 0x378407, - 0x243cc2, - 0x200d42, - 0x26cc15, - 0x247b95, - 0x275653, - 0x2480d3, - 0x2955c7, - 0x2c0ec8, - 0x379d90, - 0x3c020f, - 0x2c0ed3, - 0x2c6592, - 0x2ce1d0, - 0x2db58f, - 0x2dc512, - 0x2dffd1, - 0x2e0cd3, - 0x2e9392, - 0x2ea0cf, - 0x2f7c4e, - 0x2f9a92, - 0x2faed1, - 0x303e4f, - 0x347a4e, - 0x3559d1, - 0x2fee10, - 0x32f912, - 0x36fd51, - 0x3af4c6, - 0x30dd47, - 0x382ac7, - 0x203702, - 0x286d05, - 0x304887, - 0x21a902, - 0x218f42, - 0x230d85, - 0x226c43, - 0x244c06, - 0x26158d, - 0x2618cc, - 0x206442, - 0x31d2cb, - 0x27c58a, - 0x212b0a, - 0x2c04c9, - 0x2f0c0b, - 0x215dcd, - 0x304f8c, - 0x2f574a, - 0x277bcc, - 0x27d34b, - 0x29c54c, - 0x2b4c0b, - 0x2e31c3, - 0x36f946, - 0x3061c2, - 0x2fd502, - 0x256d03, - 0x203642, - 0x203643, - 0x260b86, - 0x268387, - 0x2c48c6, - 0x2e2448, - 0x343708, - 0x2cc7c6, - 0x20c402, - 0x309b4d, - 0x309e8c, - 0x2dea07, - 0x30db47, - 0x2302c2, - 0x221682, - 0x260982, - 0x255e82, - 0x20f882, - 0x208e83, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x215443, - 0x207102, - 0x207542, - 0x2da97d45, - 0x2de97685, - 0x2e320c86, - 0x16fb88, - 0x2e6b68c5, - 0x20f882, - 0x201742, - 0x2ea34cc5, - 0x2ee852c5, - 0x2f285e07, - 0x2f6f6e09, - 0x2fa74084, - 0x207b02, - 0x217202, - 0x2fe56a05, - 0x302977c9, - 0x30785908, - 0x30ab3185, - 0x30f3f5c7, - 0x31227248, - 0x316ec085, - 0x31a00106, - 0x31e41489, - 0x323311c8, - 0x326c8988, - 0x32a9ef0a, - 0x32e7e204, - 0x332d99c5, - 0x336c30c8, - 0x33b85d85, - 0x21a602, - 0x33e11103, - 0x342aa246, - 0x3475d1c8, - 0x34a8ab86, - 0x34e8a688, - 0x35348206, - 0x356e2dc4, - 0x204d42, - 0x35addc87, - 0x35eaf444, - 0x36280087, - 0x367b0c87, - 0x200442, - 0x36aa3885, - 0x36e8f904, - 0x372f1447, - 0x37632c47, - 0x37a89006, - 0x37e38385, - 0x3829d7c7, - 0x386d5dc8, - 0x38ab7887, - 0x38ea6c89, - 0x3939e345, - 0x397778c7, - 0x39a974c6, - 0x39e102c8, - 0x3279cd, - 0x27a209, - 0x28384b, - 0x289ecb, - 0x2ae3cb, - 0x2e62cb, - 0x31804b, - 0x31830b, - 0x318949, - 0x31984b, - 0x319b0b, - 0x31a08b, - 0x31b08a, - 0x31b5ca, - 0x31bbcc, - 0x31e00b, - 0x31ea4a, - 0x33064a, - 0x33c6ce, - 0x33d1ce, - 0x33d54a, - 0x33efca, - 0x33fa8b, - 0x33fd4b, - 0x340b0b, - 0x36124b, - 0x36184a, - 0x36250b, - 0x3627ca, - 0x362a4a, - 0x362cca, - 0x38424b, - 0x38c6cb, - 0x38e64e, - 0x38e9cb, - 0x39464b, - 0x395b0b, - 0x39900a, - 0x399289, - 0x3994ca, - 0x39a94a, - 0x3addcb, - 0x3afa8b, - 0x3b05ca, - 0x3b1fcb, - 0x3b674b, - 0x3bf58b, - 0x3a287a88, - 0x3a68fd09, - 0x3aaa6409, - 0x3aee4d48, - 0x34b945, - 0x202d43, - 0x21b744, - 0x345805, - 0x273dc6, - 0x274805, - 0x28f584, - 0x3a4dc8, - 0x312ec5, - 0x299a84, - 0x211587, - 0x2a550a, - 0x3813ca, - 0x308f07, - 0x202c47, - 0x303647, - 0x271907, - 0x2ff9c5, - 0x204906, - 0x22b9c7, - 0x2c8684, - 0x2db006, - 0x2daf06, - 0x208185, - 0x331c04, - 0x388bc6, - 0x2a4707, - 0x232646, - 0x2bfa07, - 0x232dc3, - 0x26c7c6, - 0x23cf85, - 0x285f07, - 0x27100a, - 0x284e04, - 0x220808, - 0x2a2009, - 0x2d0e47, - 0x31e8c6, - 0x257988, - 0x28b2c9, - 0x314784, - 0x376004, - 0x35d785, - 0x22b6c8, - 0x2ccc07, - 0x29a3c9, - 0x3af5c8, - 0x353706, - 0x24d486, - 0x29fd88, - 0x365bc6, - 0x297685, - 0x2890c6, - 0x280ec8, - 0x256286, - 0x25cb8b, - 0x2ac646, - 0x2a224d, - 0x208605, - 0x2af306, - 0x218a05, - 0x35d949, - 0x27a787, - 0x36d148, - 0x2969c6, - 0x2a1509, - 0x341046, - 0x270f85, - 0x2a7f06, - 0x2d3586, - 0x2d3b09, - 0x333f06, - 0x3529c7, - 0x248c85, - 0x201d83, - 0x25cd05, - 0x2a2507, - 0x338d06, - 0x208509, - 0x320c86, - 0x289306, - 0x219fc9, - 0x288ac9, - 0x2a8747, - 0x20cd08, - 0x280509, - 0x286988, - 0x38b5c6, - 0x2de245, - 0x23fa4a, - 0x289386, - 0x2bf1c6, - 0x2d7605, - 0x272408, - 0x2220c7, - 0x239fca, - 0x253b46, - 0x27a645, - 0x20a506, - 0x236b47, - 0x31e787, - 0x24fc45, - 0x271145, - 0x2e79c6, - 0x2fbfc6, - 0x2be306, - 0x2bb884, - 0x287e09, - 0x290bc6, - 0x2d430a, - 0x222b88, - 0x3059c8, - 0x3813ca, - 0x205b45, - 0x2a4645, - 0x3575c8, - 0x2b0fc8, - 0x2b43c7, - 0x295946, - 0x329608, - 0x30a447, - 0x287088, - 0x2bbec6, - 0x289b88, - 0x29cd06, - 0x257e47, - 0x2a27c6, - 0x388bc6, - 0x383d4a, - 0x345506, - 0x2de249, - 0x36b086, - 0x2b6c0a, - 0x2e2dc9, - 0x2fe406, - 0x2bccc4, - 0x2b75cd, - 0x28ff87, - 0x32df46, - 0x2c8845, - 0x3410c5, - 0x204dc6, - 0x2d4fc9, - 0x3879c7, - 0x2826c6, - 0x2bd406, - 0x28f609, - 0x33f784, - 0x3a1184, - 0x39c0c8, - 0x260f46, - 0x279388, - 0x30fec8, - 0x378187, - 0x3beb49, - 0x2be507, - 0x2b678a, - 0x2fc88f, - 0x25100a, - 0x2bdc45, - 0x281105, - 0x220085, - 0x28be47, - 0x236703, - 0x20cf08, - 0x201e46, - 0x201f49, - 0x2e4806, - 0x3a3607, - 0x2a12c9, - 0x36d048, - 0x2d76c7, - 0x315603, - 0x34b9c5, - 0x236685, - 0x2bb6cb, - 0x385e44, - 0x30ad44, - 0x27f006, - 0x315e87, - 0x392a4a, - 0x251a87, - 0x36a947, - 0x2852c5, - 0x2016c5, - 0x253689, - 0x388bc6, - 0x25190d, - 0x334145, - 0x2a10c3, - 0x200dc3, - 0x39cf05, - 0x3534c5, - 0x257988, - 0x283007, - 0x3a0f06, - 0x2a6086, - 0x232545, - 0x23cd87, - 0x377c87, - 0x23ea07, - 0x2d9a4a, - 0x26c888, - 0x2bb884, - 0x256007, - 0x284707, - 0x352846, - 0x26f5c7, - 0x2ece48, - 0x2e8548, - 0x276346, - 0x374f88, - 0x2d1704, - 0x22b9c6, - 0x239b86, - 0x333b86, - 0x2d0006, - 0x233ac4, - 0x2719c6, - 0x2c7146, - 0x29f406, - 0x2381c6, - 0x213ec6, - 0x223f06, - 0x3a0e08, - 0x3bcc88, - 0x2da288, - 0x274a08, - 0x357546, - 0x217e05, - 0x2dd4c6, - 0x2b3205, - 0x397f07, - 0x27df05, - 0x21ae83, - 0x2058c5, - 0x34cc44, - 0x214005, - 0x22dc83, - 0x33d807, - 0x374a48, - 0x2bfac6, - 0x2b0c4d, - 0x2810c6, - 0x29e985, - 0x227603, - 0x2c2a89, - 0x33f906, - 0x29dd86, - 0x2a8004, - 0x250f87, - 0x334546, - 0x387c85, - 0x20b2c3, - 0x209484, - 0x2848c6, - 0x204a04, - 0x239c88, - 0x2005c9, - 0x325f49, - 0x2a7e0a, - 0x2a918d, - 0x20abc7, - 0x2bf046, - 0x205ec4, - 0x2f6e09, - 0x28e688, - 0x28fb86, - 0x245246, - 0x26f5c7, - 0x2b9786, - 0x22c986, - 0x36aac6, - 0x3b0d0a, - 0x227248, - 0x364dc5, - 0x26fa09, - 0x28758a, - 0x2f1e88, - 0x2a40c8, - 0x29dd08, - 0x2ad74c, - 0x318585, - 0x2a6308, - 0x2e7546, - 0x36d2c6, - 0x3a34c7, - 0x251985, - 0x289245, - 0x325e09, - 0x219847, - 0x201f05, - 0x22d887, - 0x200dc3, - 0x2cd145, - 0x214308, - 0x25d087, - 0x2a3f89, - 0x2dac05, - 0x395a04, - 0x2a8e48, - 0x2dddc7, - 0x2d7888, - 0x2508c8, - 0x2d6645, - 0x281906, - 0x2a6186, - 0x277449, - 0x2b26c7, - 0x2b3ac6, - 0x2236c7, - 0x20e743, - 0x274084, - 0x2d1805, - 0x23cec4, - 0x393244, - 0x288547, - 0x25b347, - 0x234284, - 0x2a3dd0, - 0x234e47, - 0x2016c5, - 0x37178c, - 0x250684, - 0x2a9e48, - 0x257d49, - 0x36e646, - 0x34dd48, - 0x223384, - 0x37d0c8, - 0x23a5c6, - 0x238048, - 0x2a4cc6, - 0x2cc8cb, - 0x201d85, - 0x2d1688, - 0x200a04, - 0x200a0a, - 0x2a3f89, - 0x357f06, - 0x220148, - 0x263805, - 0x2b9044, - 0x2a9d46, - 0x23e8c8, - 0x287a88, - 0x329e86, - 0x358b04, - 0x23f9c6, - 0x2be587, - 0x27ff87, - 0x26f5cf, - 0x204187, - 0x2fe4c7, - 0x23d2c5, - 0x35fcc5, - 0x2a8409, - 0x2ed806, - 0x286045, - 0x288dc7, - 0x2c6188, - 0x29f505, - 0x2a27c6, - 0x2229c8, - 0x28ab8a, - 0x39c888, - 0x292f47, - 0x2fccc6, - 0x26f9c6, - 0x20ca43, - 0x2052c3, - 0x287749, - 0x280389, - 0x2a6b86, - 0x2dac05, - 0x304588, - 0x220148, - 0x365d48, - 0x36ab4b, - 0x2b0e87, - 0x315849, - 0x26f848, - 0x356284, - 0x3886c8, - 0x295089, - 0x2b3dc5, - 0x28bd47, - 0x274105, - 0x287988, - 0x297bcb, - 0x29d510, - 0x2aec45, - 0x21e20c, - 0x3a10c5, - 0x285343, - 0x296706, - 0x2c5a04, - 0x28fa06, - 0x2a4707, - 0x222a44, - 0x24c3c8, - 0x20cdcd, - 0x330a05, - 0x20ac04, - 0x241b84, - 0x27bd89, - 0x292bc8, - 0x320b07, - 0x23a648, - 0x287ec8, - 0x2829c5, - 0x28c647, - 0x282947, - 0x342807, - 0x271149, - 0x223c49, - 0x36c986, - 0x2c3a06, - 0x26f806, - 0x33e9c5, - 0x3b4944, - 0x200006, - 0x200386, - 0x282a08, - 0x23680b, - 0x284cc7, - 0x205ec4, - 0x334486, - 0x2ed187, - 0x388f45, - 0x210bc5, - 0x21b484, - 0x223bc6, - 0x200088, - 0x2f6e09, - 0x259706, - 0x28df88, - 0x387d46, - 0x355088, - 0x2d6c8c, - 0x282886, - 0x29e64d, - 0x29eacb, - 0x352a85, - 0x377dc7, - 0x334006, - 0x31e648, - 0x36ca09, - 0x276608, - 0x2016c5, - 0x2076c7, - 0x286a88, - 0x332489, - 0x2a0986, - 0x25960a, - 0x31e3c8, - 0x27644b, - 0x2d964c, - 0x37d1c8, - 0x283e46, - 0x28c048, - 0x28a807, - 0x2e4909, - 0x2976cd, - 0x2a26c6, - 0x365308, - 0x3bcb49, - 0x2c4a48, - 0x289c88, - 0x2c798c, - 0x2c8e87, - 0x2c96c7, - 0x270f85, - 0x31a807, - 0x2c6048, - 0x2a9dc6, - 0x26020c, - 0x2f60c8, - 0x2d5708, - 0x262246, - 0x236407, - 0x36cb84, - 0x274a08, - 0x28d88c, - 0x22834c, - 0x2bdcc5, - 0x2b85c7, - 0x358a86, - 0x236386, - 0x35db08, - 0x202b84, - 0x23264b, - 0x37d80b, - 0x2fccc6, - 0x20cc47, - 0x339305, - 0x278585, - 0x232786, - 0x2637c5, - 0x385e05, - 0x2e40c7, - 0x27f609, - 0x2fc184, - 0x2feac5, - 0x2ead45, - 0x2b5448, - 0x235685, - 0x2c0b89, - 0x2b16c7, - 0x2b16cb, - 0x261ac6, - 0x3a0b49, - 0x331b48, - 0x272885, - 0x342908, - 0x223c88, - 0x249b07, - 0x383b47, - 0x2885c9, - 0x237f87, - 0x27de09, - 0x29b88c, - 0x2a6b88, - 0x331009, - 0x360987, - 0x287f89, - 0x25b487, - 0x2d9748, - 0x3bed05, - 0x22b946, - 0x2c8888, - 0x30cf08, - 0x287449, - 0x385e47, - 0x278645, - 0x21f949, - 0x345306, - 0x2440c4, - 0x2440c6, - 0x35d048, - 0x254547, - 0x236a08, - 0x375049, - 0x3b1a07, - 0x2a56c6, - 0x377e84, - 0x205949, - 0x28c4c8, - 0x262107, - 0x2b56c6, - 0x236746, - 0x2bf144, - 0x241986, - 0x202003, - 0x34f109, - 0x201d46, - 0x3752c5, - 0x2a6086, - 0x2d79c5, - 0x286f08, - 0x37cf07, - 0x261e06, - 0x234d06, - 0x3059c8, - 0x2a8587, - 0x2a2705, - 0x2a3bc8, - 0x3bb748, - 0x31e3c8, - 0x3a0f85, - 0x22b9c6, - 0x325d09, - 0x2772c4, - 0x351d8b, - 0x22c68b, - 0x364cc9, - 0x200dc3, - 0x25efc5, - 0x21d306, - 0x3ba188, - 0x2fc804, - 0x2bfac6, - 0x2d9b89, - 0x2bc9c5, - 0x2e4006, - 0x2dddc6, - 0x220144, - 0x2af4ca, - 0x375208, - 0x30cf06, - 0x2cf245, - 0x3b8247, - 0x23d187, - 0x281904, - 0x22c8c7, - 0x2b6784, - 0x333b06, - 0x20cf43, - 0x271145, - 0x334f05, - 0x3beec8, - 0x2561c5, - 0x2825c9, - 0x274847, - 0x27484b, - 0x2aa04c, - 0x2aa64a, - 0x33f5c7, - 0x202e83, - 0x202e88, - 0x3a1145, - 0x29f585, - 0x2140c4, - 0x2d9646, - 0x257d46, - 0x2419c7, - 0x34d58b, - 0x233ac4, - 0x2e7644, - 0x2cbd04, - 0x2d3706, - 0x222a44, - 0x22b7c8, - 0x34b885, - 0x24fac5, - 0x365c87, - 0x377ec9, - 0x3534c5, - 0x38dcca, - 0x248b89, - 0x2911ca, - 0x3b0e49, - 0x310444, - 0x2bd4c5, - 0x2b9888, - 0x2f150b, - 0x35d785, - 0x33be86, - 0x236304, - 0x282b06, - 0x3b1889, - 0x2ed287, - 0x320e48, - 0x2a9506, - 0x2be507, - 0x287a88, - 0x3870c6, - 0x39b804, - 0x3743c7, - 0x376945, - 0x389b87, - 0x200104, - 0x333f86, - 0x2d5f48, - 0x29ec88, - 0x2e7007, - 0x27f988, - 0x29cdc5, - 0x213e44, - 0x3812c8, - 0x27fa84, - 0x220005, - 0x2ffbc4, - 0x30a547, - 0x290c87, - 0x2880c8, - 0x2d7a06, - 0x256145, - 0x2823c8, - 0x39ca88, - 0x2a7d49, - 0x22c986, - 0x23a048, - 0x20088a, - 0x388fc8, - 0x2ec085, - 0x349286, - 0x248a48, - 0x20778a, - 0x226047, - 0x28ee45, - 0x29ad48, - 0x2c2404, - 0x272486, - 0x2c9a48, - 0x213ec6, - 0x20b308, - 0x296e87, - 0x211486, - 0x2bccc4, - 0x364707, - 0x2b8e84, - 0x3b1847, - 0x2a064d, - 0x288805, - 0x2d4dcb, - 0x2285c6, - 0x257808, - 0x24c384, - 0x357746, - 0x2848c6, - 0x28c387, - 0x29e30d, - 0x24e587, - 0x2b93c8, - 0x278705, - 0x276e08, - 0x2ccb86, - 0x29ce48, - 0x22ab46, - 0x25a707, - 0x39ae89, - 0x36ebc7, - 0x28fe48, - 0x27af45, - 0x222e08, - 0x219405, - 0x3ab545, - 0x3b10c5, - 0x23ef43, - 0x289144, - 0x26fa05, - 0x241489, - 0x3043c6, - 0x2ecf48, - 0x383905, - 0x2bb507, - 0x2ad54a, - 0x2e3f49, - 0x2d348a, - 0x2da308, - 0x22d6cc, - 0x288e4d, - 0x301bc3, - 0x20b208, - 0x209445, - 0x28a946, - 0x36cec6, - 0x2ebb05, - 0x2237c9, - 0x20e1c5, - 0x2823c8, - 0x25fe06, - 0x35e006, - 0x2a8d09, - 0x39ed87, - 0x297e86, - 0x2ad4c8, - 0x333a88, - 0x2e4f47, - 0x2381ce, - 0x2ccdc5, - 0x332385, - 0x213dc8, - 0x20a247, - 0x200842, - 0x2c7504, - 0x28f90a, - 0x2621c8, - 0x389206, - 0x2a1408, - 0x2a6186, - 0x3337c8, - 0x2b3ac8, - 0x3ab504, - 0x2bba45, - 0x681384, - 0x681384, - 0x681384, - 0x201e03, - 0x2365c6, - 0x282886, - 0x2a508c, - 0x200943, - 0x223286, - 0x20cf04, - 0x33f888, - 0x2d99c5, - 0x28fa06, - 0x2c31c8, - 0x2db2c6, - 0x261d86, - 0x357d08, - 0x2d1887, - 0x237d49, - 0x2fa8ca, - 0x20a944, - 0x27df05, - 0x29a385, - 0x2f6c06, - 0x20ac06, - 0x2a5ac6, - 0x2ff206, - 0x237e84, - 0x237e8b, - 0x23c584, - 0x2a5245, - 0x2b2ac5, - 0x378246, - 0x2090c8, - 0x288d07, - 0x320c04, - 0x232fc3, - 0x2c1f05, - 0x311847, - 0x288c0b, - 0x3bedc7, - 0x2c30c8, - 0x2e7287, - 0x23d406, - 0x27a4c8, - 0x2b004b, - 0x345746, - 0x21d449, - 0x2b01c5, - 0x315603, - 0x2e4006, - 0x296d88, - 0x21f083, - 0x271e03, - 0x287a86, - 0x2a6186, - 0x36958a, - 0x283e85, - 0x28470b, - 0x2a5fcb, - 0x210a83, - 0x20b943, - 0x2b6704, - 0x2af6c7, - 0x296e04, - 0x277344, - 0x2e73c4, - 0x223e88, - 0x2cf188, - 0x205249, - 0x39e3c8, - 0x28b487, - 0x2381c6, - 0x2ecb8f, - 0x2ccf06, - 0x2d9944, - 0x2cefca, - 0x311747, - 0x208206, - 0x297509, - 0x2051c5, - 0x3bf005, - 0x205306, - 0x222f43, - 0x2c2449, - 0x2273c6, - 0x202d09, - 0x392a46, - 0x271145, - 0x2be0c5, - 0x204183, - 0x2af808, - 0x213887, - 0x201e44, - 0x33f708, - 0x2ffe04, - 0x2f0486, - 0x296706, - 0x248fc6, - 0x2d1549, - 0x29f505, - 0x388bc6, - 0x2666c9, - 0x2cb906, - 0x223f06, - 0x397346, - 0x21ce85, - 0x2ffbc6, - 0x25a704, - 0x3bed05, - 0x2c8884, - 0x2b9f86, - 0x334104, - 0x2136c3, - 0x28e745, - 0x23dac8, - 0x262987, - 0x2c1ac9, - 0x28ed48, - 0x29fb51, - 0x2dde4a, - 0x2fcc07, - 0x25a986, - 0x20cf04, - 0x2c8988, - 0x233fc8, - 0x29fd0a, - 0x2c094d, - 0x2a7f06, - 0x357e06, - 0x3647c6, - 0x24fac7, - 0x2b9485, - 0x210187, - 0x20cdc5, - 0x2b1804, - 0x2ae086, - 0x241807, - 0x2c214d, - 0x248987, - 0x3a4cc8, - 0x2826c9, - 0x349186, - 0x2a0905, - 0x22dcc4, - 0x35d146, - 0x281806, - 0x262346, - 0x2a1c88, - 0x21cd43, - 0x20aa83, - 0x338e45, - 0x207b06, - 0x2b3a85, - 0x2a9708, - 0x2a48ca, - 0x3a2dc4, - 0x33f888, - 0x29dd08, - 0x378087, - 0x3839c9, - 0x2c2dc8, - 0x2a6d07, - 0x2957c6, - 0x213eca, - 0x35d1c8, - 0x2f8589, - 0x292c88, - 0x229b89, - 0x2e8747, - 0x33bdc5, - 0x36ad46, - 0x2a9c48, - 0x287c08, - 0x29de88, - 0x2fcdc8, - 0x2a5245, - 0x218944, - 0x213588, - 0x24b384, - 0x3b0c44, - 0x271145, - 0x299ac7, - 0x377c89, - 0x28c187, - 0x2008c5, - 0x27f206, - 0x363686, - 0x200b84, - 0x2a9046, - 0x255f84, - 0x276d06, - 0x377a46, - 0x21eec6, - 0x2016c5, - 0x2a95c7, - 0x202e83, - 0x21dd89, - 0x3057c8, - 0x2f6d04, - 0x2f6d0d, - 0x29ed88, - 0x2d7248, - 0x2f8506, - 0x39af89, - 0x2e3f49, - 0x3b1585, - 0x2a49ca, - 0x2edbca, - 0x2a5ccc, - 0x2a5e46, - 0x27fe06, - 0x2cd086, - 0x2c84c9, - 0x28ab86, - 0x2101c6, - 0x20e286, - 0x274a08, - 0x27f986, - 0x2d92cb, - 0x299c45, - 0x24fac5, - 0x280085, - 0x39be46, - 0x213e83, - 0x248f46, - 0x248907, - 0x2c8845, - 0x24d545, - 0x3410c5, - 0x313846, - 0x204dc4, - 0x385806, - 0x284049, - 0x39bccc, - 0x2b1548, - 0x23e844, - 0x2ff8c6, - 0x2286c6, - 0x296d88, - 0x220148, - 0x39bbc9, - 0x3b8247, - 0x260c89, - 0x255806, - 0x237404, - 0x214944, - 0x20a584, - 0x287a88, - 0x377aca, - 0x353446, - 0x35fb87, - 0x37e787, - 0x3a0c45, - 0x29a344, - 0x295046, - 0x2b94c6, - 0x202bc3, - 0x305607, - 0x2507c8, - 0x3b16ca, - 0x2d4708, - 0x28a688, - 0x334145, - 0x352b85, - 0x284dc5, - 0x3a1006, - 0x2393c6, - 0x25b285, - 0x34f349, - 0x29a14c, - 0x284e87, - 0x29fd88, - 0x24ee05, - 0x681384, - 0x240ac4, - 0x25d1c4, - 0x217946, - 0x2a728e, - 0x3bf087, - 0x24fcc5, - 0x27724c, - 0x2ffcc7, - 0x241787, - 0x274e89, - 0x2208c9, - 0x28ee45, - 0x3057c8, - 0x325d09, - 0x31e285, - 0x2c8788, - 0x227546, - 0x381546, - 0x2e2dc4, - 0x25ff08, - 0x248743, - 0x235e44, - 0x2c1f85, - 0x204dc7, - 0x21b4c5, - 0x200749, - 0x27e64d, - 0x2935c6, - 0x229b04, - 0x2958c8, - 0x27f44a, - 0x21da87, - 0x243905, - 0x235e83, - 0x2a618e, - 0x2af90c, - 0x2f1f87, - 0x2a7447, - 0x200143, - 0x28abc5, - 0x25d1c5, - 0x2a17c8, - 0x29db49, - 0x23e746, - 0x296e04, - 0x2fcb46, - 0x3650cb, - 0x2e3ccc, - 0x376447, - 0x2d9585, - 0x3bb648, - 0x2e4d05, - 0x2cefc7, - 0x2ddc87, - 0x248745, - 0x213e83, - 0x3b36c4, - 0x21b705, - 0x2fc085, - 0x2fc086, - 0x2821c8, - 0x241807, - 0x36d1c6, - 0x25b686, - 0x3b1006, - 0x2f88c9, - 0x28c747, - 0x262606, - 0x2e3e46, - 0x27e106, - 0x2af405, - 0x21e8c6, - 0x390e05, - 0x235708, - 0x2990cb, - 0x294b86, - 0x37e7c4, - 0x2c8109, - 0x274844, - 0x2274c8, - 0x2441c7, - 0x289b84, - 0x2c2688, - 0x2c94c4, - 0x2af444, - 0x39ac45, - 0x330a46, - 0x223dc7, - 0x20b3c3, - 0x2a5785, - 0x32a504, - 0x3323c6, - 0x3b1608, - 0x39c785, - 0x298d89, - 0x21fb45, - 0x223288, - 0x22cfc7, - 0x398048, - 0x2c1907, - 0x2fe589, - 0x271846, - 0x360486, - 0x20e284, - 0x295705, - 0x3093cc, - 0x280087, - 0x280fc7, - 0x37e648, - 0x2935c6, - 0x2794c4, - 0x34bc04, - 0x288449, - 0x2cd186, - 0x253707, - 0x2cff84, - 0x24ab06, - 0x35f245, - 0x2d7547, - 0x2d9246, - 0x2594c9, - 0x2eda07, - 0x26f5c7, - 0x2a8b86, - 0x24aa45, - 0x285988, - 0x227248, - 0x2f6a46, - 0x39c7c5, - 0x344806, - 0x202c03, - 0x2a1649, - 0x2a584e, - 0x2c1608, - 0x2fff08, - 0x2f684b, - 0x298fc6, - 0x20a884, - 0x261d84, - 0x2a594a, - 0x21e107, - 0x2626c5, - 0x21d449, - 0x2c7205, - 0x3b0c87, - 0x250584, - 0x27b907, - 0x30fdc8, - 0x2d0f06, - 0x365489, - 0x2c2eca, - 0x21e086, - 0x29e8c6, - 0x2b2a45, - 0x38ef85, - 0x325647, - 0x24ec48, - 0x35f188, - 0x3ab506, - 0x2be145, - 0x20a98e, - 0x2bb884, - 0x2a1745, - 0x27eb89, - 0x2ed608, - 0x292e86, - 0x2a36cc, - 0x2a44d0, - 0x2a6ecf, - 0x2a8308, - 0x33f5c7, - 0x2016c5, - 0x26fa05, - 0x389089, - 0x29af49, - 0x23fac6, - 0x35d807, - 0x2b8545, - 0x2b43c9, - 0x3528c6, - 0x28a9cd, - 0x288789, - 0x277344, - 0x2c1388, - 0x213649, - 0x353606, - 0x27f305, - 0x360486, - 0x320d09, - 0x281688, - 0x217e05, - 0x200984, - 0x2a388b, - 0x3534c5, - 0x2a39c6, - 0x289186, - 0x26e646, - 0x27c18b, - 0x298e89, - 0x25b5c5, - 0x397e07, - 0x2dddc6, - 0x34dec6, - 0x25cf48, - 0x330b49, - 0x3a4a8c, - 0x311648, - 0x23c586, - 0x329e83, - 0x28bf46, - 0x27bfc5, - 0x284a48, - 0x2bdb46, - 0x2d7788, - 0x251b05, - 0x283245, - 0x27a8c8, - 0x333947, - 0x36ce07, - 0x2419c7, - 0x34dd48, - 0x39ad08, - 0x31a706, - 0x2b9dc7, - 0x273f47, - 0x27be8a, - 0x20d703, - 0x39be46, - 0x23e985, - 0x28f904, - 0x2826c9, - 0x2fe504, - 0x262a04, - 0x2a4d44, - 0x2a744b, - 0x2137c7, - 0x20abc5, - 0x29cac8, - 0x27f206, - 0x27f208, - 0x283dc6, - 0x293345, - 0x293e85, - 0x295f46, - 0x296b48, - 0x297448, - 0x282886, - 0x29c90f, - 0x2a1110, - 0x208605, - 0x202e83, - 0x2374c5, - 0x315788, - 0x29ae49, - 0x31e3c8, - 0x2f8748, - 0x2bec08, - 0x213887, - 0x27eec9, - 0x2d7988, - 0x2730c4, - 0x2a4bc8, - 0x2b5509, - 0x2babc7, - 0x2a2644, - 0x28c248, - 0x2a938a, - 0x3085c6, - 0x2a7f06, - 0x22c849, - 0x2a4707, - 0x2d4588, - 0x2fdbc8, - 0x2cfe08, - 0x3690c5, - 0x38ff05, - 0x24fac5, - 0x25d185, - 0x38cb87, - 0x213e85, - 0x2c8845, - 0x20ae06, - 0x31e307, - 0x2f1447, - 0x2a9686, - 0x2da845, - 0x2a39c6, - 0x202f45, - 0x2b83c8, - 0x2f1e04, - 0x2cb986, - 0x348084, - 0x2b9048, - 0x2cba8a, - 0x28300c, - 0x34d785, - 0x24fb86, - 0x3a4c46, - 0x234b86, - 0x23c604, - 0x35f505, - 0x283c07, - 0x2a4789, - 0x2d3c07, - 0x681384, - 0x681384, - 0x320a85, - 0x38d584, - 0x2a308a, - 0x27f086, - 0x27a704, - 0x208185, - 0x3875c5, - 0x2b93c4, - 0x288dc7, - 0x21fac7, - 0x2d3708, - 0x342348, - 0x217e09, - 0x2a5308, - 0x2a324b, - 0x251044, - 0x375f45, - 0x2860c5, - 0x241949, - 0x330b49, - 0x2c8008, - 0x243f48, - 0x2df044, - 0x228705, - 0x202d43, - 0x2f6bc5, - 0x388c46, - 0x29d98c, - 0x2189c6, - 0x37cfc6, - 0x293105, - 0x3138c8, - 0x2c1786, - 0x25ab06, - 0x2a7f06, - 0x22e2cc, - 0x262504, - 0x3b114a, - 0x293048, - 0x29d7c7, - 0x32a406, - 0x23e807, - 0x2f2ec5, - 0x2b56c6, - 0x35c286, - 0x367cc7, - 0x262a44, - 0x30a645, - 0x27eb84, - 0x2b1887, - 0x27edc8, - 0x27fc8a, - 0x286907, - 0x375387, - 0x33f547, - 0x2e4e49, - 0x29d98a, - 0x2373c3, - 0x262945, - 0x20b343, - 0x2e7409, - 0x254ec8, - 0x23d2c7, - 0x31e4c9, - 0x227346, - 0x2042c8, - 0x33d785, - 0x39cb8a, - 0x2dbc89, - 0x276209, - 0x3a34c7, - 0x2340c9, - 0x21edc8, - 0x367e86, - 0x24fd48, - 0x21ce87, - 0x237f87, - 0x248b87, - 0x2d5dc8, - 0x2ff746, - 0x2a9145, - 0x283c07, - 0x29e3c8, - 0x348004, - 0x2d41c4, - 0x297d87, - 0x2b3e47, - 0x325b8a, - 0x367e06, - 0x35854a, - 0x2c7447, - 0x2bb647, - 0x358004, - 0x27dec4, - 0x2d7446, - 0x281b84, - 0x281b8c, - 0x203185, - 0x21ff89, - 0x265684, - 0x2b9485, - 0x27f3c8, - 0x22d245, - 0x204dc6, - 0x225f44, - 0x28f30a, - 0x2b25c6, - 0x2a424a, - 0x2b7887, - 0x236b45, - 0x222f45, - 0x3a0c8a, - 0x296cc5, - 0x2a7e06, - 0x24b384, - 0x2b6886, - 0x325705, - 0x2bdc06, - 0x2e700c, - 0x2d388a, - 0x2957c4, - 0x2381c6, - 0x2a4707, - 0x2d91c4, - 0x274a08, - 0x39e246, - 0x20a809, - 0x2baec9, - 0x2a6c89, - 0x351f46, - 0x21cf86, - 0x24fe87, - 0x34f288, - 0x21cd89, - 0x2137c7, - 0x29cc46, - 0x2be587, - 0x364685, - 0x2bb884, - 0x24fa47, - 0x274105, - 0x28f845, - 0x36c347, - 0x248608, - 0x3bb5c6, - 0x29f24d, - 0x2a19cf, - 0x2a5fcd, - 0x200904, - 0x23dbc6, - 0x2dc1c8, - 0x20e245, - 0x27c048, - 0x2499ca, - 0x277344, - 0x365646, - 0x33ae07, - 0x233ac7, - 0x2d1949, - 0x24fd05, - 0x2b93c4, - 0x2bb98a, - 0x2c2989, - 0x2341c7, - 0x272306, - 0x353606, - 0x228646, - 0x374486, - 0x2db94f, - 0x2dc089, - 0x27f986, - 0x233ec6, - 0x320289, - 0x2b9ec7, - 0x229403, - 0x22e446, - 0x2052c3, - 0x2eb9c8, - 0x2be3c7, - 0x2a8509, - 0x296588, - 0x36cf48, - 0x385f86, - 0x218909, - 0x398845, - 0x2b9f84, - 0x29a687, - 0x2c8545, - 0x200904, - 0x20ac88, - 0x202044, - 0x2b9c07, - 0x3749c6, - 0x2e7a85, - 0x292c88, - 0x3534cb, - 0x3778c7, - 0x3a0f06, - 0x2ccf84, - 0x348186, - 0x271145, - 0x274105, - 0x285709, - 0x2889c9, - 0x237fc4, - 0x238005, - 0x238205, - 0x39ca06, - 0x3058c8, - 0x2c6b86, - 0x25060b, - 0x36e4ca, - 0x2b8f85, - 0x293f06, - 0x3a2ac5, - 0x2e9dc5, - 0x2ad387, - 0x39c0c8, - 0x260c84, - 0x26be86, - 0x2974c6, - 0x21ef87, - 0x3155c4, - 0x2848c6, - 0x2427c5, - 0x2427c9, - 0x21b584, - 0x29a4c9, - 0x282886, - 0x2c8f48, - 0x238205, - 0x37e885, - 0x2bdc06, - 0x3a4989, - 0x2208c9, - 0x37d046, - 0x2ed708, - 0x277348, - 0x3a2a84, - 0x2bbcc4, - 0x2bbcc8, - 0x32e048, - 0x260d89, - 0x388bc6, - 0x2a7f06, - 0x3294cd, - 0x2bfac6, - 0x2d6b49, - 0x2dd5c5, - 0x205306, - 0x2102c8, - 0x326885, - 0x273f84, - 0x271145, - 0x2882c8, - 0x2a2e49, - 0x27ec44, - 0x333f86, - 0x22d10a, - 0x2f1e88, - 0x325d09, - 0x261f0a, - 0x31e446, - 0x2a1b88, - 0x2ced85, - 0x2c5ec8, - 0x2c1a05, - 0x227209, - 0x37ac49, - 0x203282, - 0x2b01c5, - 0x2782c6, - 0x2827c7, - 0x34e085, - 0x30ce06, - 0x326948, - 0x2935c6, - 0x2b9749, - 0x2810c6, - 0x25cdc8, - 0x2b0805, - 0x264906, - 0x25a808, - 0x287a88, - 0x2e8648, - 0x353788, - 0x21e8c4, - 0x281943, - 0x2b9984, - 0x286b06, - 0x3646c4, - 0x2ffe47, - 0x25aa09, - 0x2cbd05, - 0x2fdbc6, - 0x22e446, - 0x28200b, - 0x2b8ec6, - 0x2cf8c6, - 0x2d13c8, - 0x24d486, - 0x236943, - 0x2164c3, - 0x2bb884, - 0x239f45, - 0x387b87, - 0x27edc8, - 0x27edcf, - 0x283b0b, - 0x3056c8, - 0x334006, - 0x3059ce, - 0x251143, - 0x387b04, - 0x2b8e45, - 0x2b9246, - 0x29514b, - 0x299b86, - 0x222a49, - 0x2e7a85, - 0x3999c8, - 0x216688, - 0x22078c, - 0x2a7486, - 0x2f6c06, - 0x2dac05, - 0x28fc08, - 0x25a805, - 0x356288, - 0x2a3a4a, - 0x2a6409, - 0x681384, - 0x3b60f882, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x39c783, - 0x238543, - 0x23cac3, - 0x323043, - 0x231604, - 0x208e83, - 0x201a03, - 0x213083, - 0x286644, - 0x238543, - 0x240244, - 0x23cac3, - 0x2de944, - 0x323043, - 0x34e347, - 0x28cac3, - 0x200e03, - 0x293408, - 0x201a03, - 0x29630b, - 0x2f3743, - 0x3a03c6, - 0x205082, - 0x22facb, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x201a03, - 0x220b83, - 0x201503, - 0x207102, - 0x16fb88, - 0x32d1c5, - 0x274188, - 0x2f9f88, - 0x20f882, - 0x20a605, - 0x3785c7, - 0x201842, - 0x24c5c7, - 0x207b02, - 0x2f6607, - 0x2cc409, - 0x2ce948, - 0x2cfc89, - 0x24b2c2, - 0x2707c7, - 0x37cdc4, - 0x378687, - 0x36e3c7, - 0x264d42, - 0x28cac3, - 0x214642, - 0x204d42, - 0x200442, - 0x21cc82, - 0x206b42, - 0x20d2c2, - 0x2aff05, - 0x240a05, - 0xf882, - 0x3cac3, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x1a3443, - 0x201a03, - 0x170c3, - 0x8c1, - 0x238543, - 0x23cac3, - 0x323043, - 0x231604, - 0x255783, - 0x208e83, - 0x1a3443, - 0x201a03, - 0x221f43, - 0x3e4f5906, - 0x42bc3, - 0x873c5, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x20f882, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x84c2, - 0x16fb88, - 0xe03, - 0x1a3443, - 0x4ec04, - 0xe5105, - 0x207102, - 0x39cdc4, - 0x238543, - 0x23cac3, - 0x323043, - 0x38acc3, - 0x2b13c5, - 0x255783, - 0x211a83, - 0x208e83, - 0x21b543, - 0x201a03, - 0x215443, - 0x20e383, - 0x202443, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x20f882, - 0x201a03, - 0x16fb88, - 0x323043, - 0x1a3443, - 0x16fb88, - 0x1a3443, - 0x2bcc43, - 0x238543, - 0x23a844, - 0x23cac3, - 0x323043, - 0x205e82, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x205e82, - 0x229443, - 0x208e83, - 0x201a03, - 0x2ef783, - 0x215443, - 0x207102, - 0x20f882, - 0x323043, - 0x208e83, - 0x201a03, - 0x3a03c5, - 0xa4f06, - 0x286644, - 0x205082, - 0x16fb88, - 0x207102, - 0x25088, - 0x134943, - 0x20f882, - 0x42899306, - 0x6a04, - 0xb610b, - 0x44e86, - 0x8cbc7, - 0x23cac3, - 0x51648, - 0x323043, - 0x8b205, - 0x1493c4, - 0x227583, - 0x556c7, - 0xe06c4, - 0x208e83, - 0x1a3284, - 0x1a3443, - 0x201a03, - 0x2f4544, - 0xb5ec8, - 0x12cf06, - 0x16308, - 0x1252c5, - 0x9fc9, - 0x20f882, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x200e03, - 0x201a03, - 0x2f3743, - 0x205082, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x231603, - 0x21bf84, - 0x208e83, - 0xe03, - 0x201a03, - 0x238543, - 0x23cac3, - 0x2de944, - 0x323043, - 0x208e83, - 0x201a03, - 0x3a03c6, - 0x23cac3, - 0x323043, - 0x18a783, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x8cbc7, - 0x16fb88, - 0x323043, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x45238543, - 0x23cac3, - 0x208e83, - 0x201a03, - 0x16fb88, - 0x207102, - 0x20f882, - 0x238543, - 0x323043, - 0x208e83, - 0x200442, - 0x201a03, - 0x31f1c7, - 0x342b8b, - 0x22fc83, - 0x244708, - 0x34f007, - 0x348746, - 0x382d45, - 0x232309, - 0x28c848, - 0x346789, - 0x346790, - 0x36f64b, - 0x2e2109, - 0x205dc3, - 0x20af09, - 0x23bd86, - 0x23bd8c, - 0x32d288, - 0x3bc208, - 0x244a49, - 0x29854e, - 0x2cc1cb, - 0x2e5c0c, - 0x203ec3, - 0x26ad0c, - 0x203ec9, - 0x30ae47, - 0x23ca0c, - 0x2b478a, - 0x252044, - 0x2768cd, - 0x26abc8, - 0x21308d, - 0x26fec6, - 0x28664b, - 0x200cc9, - 0x2cf787, - 0x332c86, - 0x3372c9, - 0x34834a, - 0x319108, - 0x2f3204, - 0x2fe987, - 0x363787, - 0x2d0184, - 0x38d204, - 0x2345c9, - 0x28a4c9, - 0x2b7288, - 0x216d05, - 0x339645, - 0x213c86, - 0x276789, - 0x249c4d, - 0x33bf88, - 0x213b87, - 0x382dc8, - 0x2fa686, - 0x39b444, - 0x2501c5, - 0x201c46, - 0x202884, - 0x203dc7, - 0x206f4a, - 0x219784, - 0x21dfc6, - 0x21ea49, - 0x21ea4f, - 0x21fc8d, - 0x220f06, - 0x224c90, - 0x225086, - 0x2257c7, - 0x2269c7, - 0x2269cf, - 0x2276c9, - 0x22cb06, - 0x22da47, - 0x22da48, - 0x22f289, - 0x358088, - 0x2eb507, - 0x212843, - 0x394f46, - 0x3c0b48, - 0x29880a, - 0x236089, - 0x205d83, - 0x3784c6, - 0x26bcca, - 0x28eb87, - 0x30ac8a, - 0x25a18e, - 0x227806, - 0x2b03c7, - 0x217bc6, - 0x203f86, - 0x38fd0b, - 0x31708a, - 0x32138d, - 0x21d047, - 0x20e408, - 0x20e409, - 0x20e40f, - 0x2c1c4c, - 0x2b4089, - 0x2d890e, - 0x34e44a, - 0x28b906, - 0x314a86, - 0x319d8c, - 0x31be8c, - 0x327508, - 0x36eac7, - 0x274d85, - 0x3485c4, - 0x20f88e, - 0x299684, - 0x388947, - 0x39140a, - 0x38a814, - 0x39390f, - 0x226b88, - 0x394e08, - 0x35eccd, - 0x35ecce, - 0x3a0849, - 0x238788, - 0x23878f, - 0x23c70c, - 0x23c70f, - 0x23d907, - 0x240c0a, - 0x2459cb, - 0x243788, - 0x245c87, - 0x3ac74d, - 0x322b46, - 0x276a86, - 0x248dc9, - 0x364b08, - 0x24cf48, - 0x24cf4e, - 0x2f4087, - 0x24e145, - 0x24e9c5, - 0x204b44, - 0x348a06, - 0x2b7188, - 0x20db03, - 0x2f948e, - 0x3acb08, - 0x2b588b, - 0x378bc7, - 0x3ab345, - 0x233d86, - 0x2b1f87, - 0x32f2c8, - 0x325449, - 0x322dc5, - 0x28e788, - 0x21c946, - 0x3afeca, - 0x20f789, - 0x23cac9, - 0x23cacb, - 0x346448, - 0x2d0049, - 0x216dc6, - 0x23768a, - 0x293c0a, - 0x240e0c, - 0x28e4c7, - 0x2ce74a, - 0x36b38b, - 0x36b399, - 0x312408, - 0x3a0445, - 0x2cdd46, - 0x25c489, - 0x3449c6, - 0x2df8ca, - 0x28ca46, - 0x20df44, - 0x2cdecd, - 0x20df47, - 0x218209, - 0x250ac5, - 0x250c08, - 0x251409, - 0x251844, - 0x251f47, - 0x251f48, - 0x2526c7, - 0x26e2c8, - 0x255cc7, - 0x25b845, - 0x25f3cc, - 0x25fc09, - 0x2c8c0a, - 0x39ec09, - 0x20b009, - 0x37ee4c, - 0x264f0b, - 0x2662c8, - 0x267448, - 0x26a804, - 0x289848, - 0x28d209, - 0x2b4847, - 0x20e646, - 0x200f47, - 0x2c4289, - 0x32264b, - 0x325147, - 0x201a87, - 0x2b79c7, - 0x213004, - 0x213005, - 0x2a7c05, - 0x34b1cb, - 0x3a9384, - 0x350448, - 0x26e94a, - 0x21ca07, - 0x300687, - 0x294712, - 0x276c06, - 0x23a1c6, - 0x33888e, - 0x27ab46, - 0x29abc8, - 0x29b38f, - 0x213448, - 0x302848, - 0x3bd10a, - 0x3bd111, - 0x2a990e, - 0x25654a, - 0x25654c, - 0x20bf07, - 0x238990, - 0x200408, - 0x2a9b05, - 0x2b238a, - 0x2028cc, - 0x29cf8d, - 0x302346, - 0x302347, - 0x30234c, - 0x30c80c, - 0x335d4c, - 0x2edfcb, - 0x28e0c4, - 0x22c9c4, - 0x354609, - 0x39e807, - 0x229989, - 0x293a49, - 0x3b6587, - 0x2b4606, - 0x2b4609, - 0x2b4a03, - 0x21b7ca, - 0x31fd07, - 0x34304b, - 0x32120a, - 0x2f6744, - 0x35f646, - 0x286b89, - 0x281a04, - 0x20324a, - 0x3a1205, - 0x2c4d45, - 0x2c4d4d, - 0x2c508e, - 0x2b9ac5, - 0x32ab86, - 0x39ffc7, - 0x25f64a, - 0x3a8286, - 0x2eefc4, - 0x2f9847, - 0x3bc50b, - 0x2fa747, - 0x30b444, - 0x256fc6, - 0x256fcd, - 0x2c3f4c, - 0x208d46, - 0x33c18a, - 0x230206, - 0x22ddc8, - 0x285107, - 0x34c98a, - 0x3840c6, - 0x210443, - 0x210446, - 0x3c09c8, - 0x2a344a, - 0x2801c7, - 0x2801c8, - 0x289e04, - 0x256ac7, - 0x283288, - 0x345388, - 0x284508, - 0x35874a, - 0x2e4505, - 0x2e9a07, - 0x256393, - 0x343d86, - 0x2e0908, - 0x229f89, - 0x24c488, - 0x38600b, - 0x2d3d48, - 0x2bc644, - 0x27a9c6, - 0x317ec6, - 0x330889, - 0x3bc3c7, - 0x25f4c8, - 0x2931c6, - 0x36c244, - 0x30aa05, - 0x2d4008, - 0x2cd88a, - 0x2cdb48, - 0x2d4b06, - 0x2a1d8a, - 0x2fc208, - 0x2d8fc8, - 0x2d9ec8, - 0x2da506, - 0x2dc3c6, - 0x20c0cc, - 0x2dc990, - 0x285505, - 0x213248, - 0x30d410, - 0x213250, - 0x34660e, - 0x20bd4e, - 0x20bd54, - 0x20e78f, - 0x20eb46, - 0x3072d1, - 0x332e13, - 0x333288, - 0x31d245, - 0x2a0bc8, - 0x395705, - 0x23540c, - 0x2309c9, - 0x2994c9, - 0x230e47, - 0x263549, - 0x261047, - 0x2ffa46, - 0x24ffc7, - 0x20ef05, - 0x217103, - 0x20dcc9, - 0x22a249, - 0x38a783, - 0x3b35c4, - 0x358c8d, - 0x3b83cf, - 0x36c285, - 0x331786, - 0x21ac47, - 0x32d007, - 0x290806, - 0x29080b, - 0x2aa805, - 0x263c06, - 0x300b87, - 0x257449, - 0x345a06, - 0x20cb45, - 0x2248cb, - 0x230786, - 0x38ad45, - 0x273988, - 0x2a6988, - 0x2ba50c, - 0x2ba510, - 0x2b64c9, - 0x2c5607, - 0x2e520b, - 0x30be86, - 0x2eb3ca, - 0x2ec90b, - 0x2ee70a, - 0x2ee986, - 0x2ef645, - 0x31fa46, - 0x37d408, - 0x230f0a, - 0x35e95c, - 0x2f380c, - 0x2f3b08, - 0x3a03c5, - 0x35cec7, - 0x25b0c6, - 0x27f7c5, - 0x2227c6, - 0x2909c8, - 0x2c2c07, - 0x298448, - 0x2b04ca, - 0x33764c, - 0x3378c9, - 0x39b5c7, - 0x215c04, - 0x24ea86, - 0x2d518a, - 0x293b45, - 0x211ecc, - 0x212e48, - 0x389c88, - 0x21904c, - 0x2266cc, - 0x229549, - 0x229787, - 0x23ff4c, - 0x2454c4, - 0x24718a, - 0x23354c, - 0x279a4b, - 0x24bfcb, - 0x3821c6, - 0x2f7447, - 0x20e947, - 0x238bcf, - 0x303191, - 0x2e16d2, - 0x314ecd, - 0x314ece, - 0x31520e, - 0x20e948, - 0x20e952, - 0x253e08, - 0x34ec47, - 0x25430a, - 0x208b08, - 0x27ab05, - 0x38c9ca, - 0x2255c7, - 0x2e6f44, - 0x227103, - 0x297185, - 0x3bd387, - 0x2fb547, - 0x29d18e, - 0x308c8d, - 0x30d7c9, - 0x21f545, - 0x31c443, - 0x326446, - 0x264085, - 0x27dc48, - 0x2c0649, - 0x2a0105, - 0x3ac94f, - 0x2b6207, - 0x382bc5, - 0x37958a, - 0x358946, - 0x2522c9, - 0x37db4c, - 0x2fec09, - 0x2094c6, - 0x26e74c, - 0x329f86, - 0x3017c8, - 0x301c86, - 0x312586, - 0x2082c4, - 0x266643, - 0x2b380a, - 0x32e411, - 0x30650a, - 0x265345, - 0x271ac7, - 0x25c7c7, - 0x283384, - 0x28338b, - 0x2cfb08, - 0x2c1486, - 0x37e6c5, - 0x3b01c4, - 0x280ac9, - 0x320804, - 0x24cd87, - 0x359f05, - 0x359f07, - 0x338ac5, - 0x2affc3, - 0x34eb08, - 0x35f2ca, - 0x20b3c3, - 0x32d20a, - 0x281ec6, - 0x3ac6cf, - 0x2f4009, - 0x2f9410, - 0x2ebe48, - 0x2d5809, - 0x29f087, - 0x256f4f, - 0x31e884, - 0x2de9c4, - 0x224f06, - 0x317b06, - 0x2e2aca, - 0x381c46, - 0x2ff587, - 0x30c148, - 0x30c347, - 0x30cbc7, - 0x30f08a, - 0x310b4b, - 0x3b1b45, - 0x2e1308, - 0x204443, - 0x2045cc, - 0x38000f, - 0x274b8d, - 0x2aefc7, - 0x30d909, - 0x2e8207, - 0x24f2c8, - 0x38aa0c, - 0x2bc548, - 0x231848, - 0x321d0e, - 0x336054, - 0x336564, - 0x354e4a, - 0x37018b, - 0x261104, - 0x261109, - 0x3656c8, - 0x24ef85, - 0x20d60a, - 0x3acd47, - 0x31f944, - 0x39c783, - 0x238543, - 0x240244, - 0x23cac3, - 0x323043, - 0x231604, - 0x255783, - 0x28cac3, - 0x20c0c6, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x221483, - 0x207102, - 0x39c783, - 0x20f882, - 0x238543, - 0x240244, - 0x23cac3, - 0x323043, - 0x255783, - 0x20c0c6, - 0x208e83, - 0x201a03, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x21b583, - 0x208e83, - 0x1a3443, - 0x201a03, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x207102, - 0x242043, - 0x20f882, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x201382, - 0x235f42, - 0x20f882, - 0x238543, - 0x206902, - 0x200942, - 0x231604, - 0x20f644, - 0x22a482, - 0x21bf84, - 0x200442, - 0x201a03, - 0x221483, - 0x3821c6, - 0x21a902, - 0x202642, - 0x20c4c2, - 0x47a13443, - 0x47e0bf03, - 0x5d306, - 0x5d306, - 0x286644, - 0x200e03, - 0x14b700a, - 0x12ea0c, - 0xf4cc, - 0x871cd, - 0x131645, - 0x26547, - 0x1b1c6, - 0x21088, - 0x23087, - 0x28b08, - 0x1aa20a, - 0x1397c7, - 0x48adf485, - 0x1359c9, - 0x3e34b, - 0x35dcb, - 0x42e48, - 0x172f4a, - 0x9288e, - 0x144c28b, - 0x6a04, - 0x63d46, - 0x7588, - 0xf8d08, - 0x3e607, - 0x1a787, - 0x57f89, - 0x81a87, - 0xdd088, - 0x12f5c9, - 0x49804, - 0x49f45, - 0x12bfce, - 0xb084d, - 0x8ca48, - 0x48e34406, - 0x49834408, - 0x7b548, - 0x11f3d0, - 0x5998c, - 0x6b9c7, - 0x6c647, - 0x71387, - 0x77fc7, - 0x13c42, - 0x144ec7, - 0x11724c, - 0x43b87, - 0xac206, - 0xac7c9, - 0xae208, - 0x206c2, - 0x942, - 0xbee8b, - 0x1a3307, - 0x18009, - 0x164ec9, - 0x3ef48, - 0xb8042, - 0x134649, - 0xcc60a, - 0xd2689, - 0xdfdc9, - 0xe0b08, - 0xe1b87, - 0xe4489, - 0xe61c5, - 0xe67d0, - 0x191646, - 0x11205, - 0x31e8d, - 0x235c6, - 0xefd07, - 0xf4558, - 0x14f508, - 0xc74a, - 0xb282, - 0x5524d, - 0xa02, - 0x86286, - 0x95408, - 0x8f148, - 0x16fa49, - 0x586c8, - 0x6420e, - 0x126447, - 0x1051cd, - 0xfb445, - 0x144c48, - 0x19fc08, - 0x106046, - 0xc2, - 0x12cf06, - 0x4542, - 0x341, - 0x65a07, - 0xf6fc3, - 0x492f4dc4, - 0x4969c243, - 0x141, - 0x19d06, - 0x141, - 0x1, - 0x19d06, - 0xf6fc3, - 0x1402285, - 0x252044, - 0x238543, - 0x253384, - 0x231604, - 0x208e83, - 0x229e45, - 0x221f43, - 0x20c843, - 0x355685, - 0x202443, - 0x4aa38543, - 0x23cac3, - 0x323043, - 0x200041, - 0x28cac3, - 0x20f644, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x215443, - 0x16fb88, - 0x207102, - 0x39c783, - 0x20f882, - 0x238543, - 0x23cac3, - 0x21b583, - 0x200942, - 0x231604, - 0x255783, - 0x28cac3, - 0x208e83, - 0x200e03, - 0x201a03, - 0x202443, - 0x16fb88, - 0x37fd82, - 0x18c1c7, - 0xf882, - 0x10a985, - 0x1480cc8, - 0x10c50e, - 0x4ba0ab02, - 0x31fec8, - 0x2bdd86, - 0x2ca186, - 0x2bd707, - 0x4be00b42, - 0x4c3ac548, - 0x21870a, - 0x26b448, - 0x200242, - 0x31fb49, - 0x3b1b87, - 0x21ec06, - 0x34e849, - 0x2e9b44, - 0x348646, - 0x2ca584, - 0x27f584, - 0x25f009, - 0x32d906, - 0x240ac5, - 0x297a85, - 0x3b9d87, - 0x2c76c7, - 0x2979c4, - 0x2bd946, - 0x307b85, - 0x30a3c5, - 0x3a2a05, - 0x339407, - 0x378a05, - 0x31ddc9, - 0x234fc5, - 0x32f404, - 0x3a81c7, - 0x341b0e, - 0x306bc9, - 0x338749, - 0x388d86, - 0x24a608, - 0x36ae4b, - 0x2b698c, - 0x33ea46, - 0x2e5ac7, - 0x212245, - 0x38d20a, - 0x2b7389, - 0x209b49, - 0x259f06, - 0x300945, - 0x2edac5, - 0x3570c9, - 0x3a2b8b, - 0x27e286, - 0x3471c6, - 0x20de04, - 0x2943c6, - 0x24e1c8, - 0x3c0846, - 0x215006, - 0x205fc8, - 0x2092c7, - 0x209909, - 0x211385, - 0x16fb88, - 0x21a704, - 0x2394c4, - 0x201105, - 0x3a6649, - 0x228f87, - 0x228f8b, - 0x22b3ca, - 0x230905, - 0x4c612842, - 0x342f07, - 0x4ca30c08, - 0x3578c7, - 0x2c3d45, - 0x209dca, - 0xf882, - 0x2be6cb, - 0x255e0a, - 0x22a146, - 0x216383, - 0x2a038d, - 0x3572cc, - 0x357a4d, - 0x250545, - 0x334fc5, - 0x20db47, - 0x36c689, - 0x218606, - 0x381ac5, - 0x2d2b88, - 0x2942c3, - 0x2fa288, - 0x2942c8, - 0x2cb287, - 0x314808, - 0x3b49c9, - 0x374847, - 0x342707, - 0x202108, - 0x2d1c84, - 0x2d1c87, - 0x26fdc8, - 0x355546, - 0x3b874f, - 0x226207, - 0x2eb686, - 0x2298c5, - 0x22a8c3, - 0x381947, - 0x37cc43, - 0x252886, - 0x254006, - 0x254706, - 0x298b85, - 0x26e2c3, - 0x397cc8, - 0x37f889, - 0x3920cb, - 0x254888, - 0x255985, - 0x2584c5, - 0x4cef6802, - 0x250089, - 0x34eec7, - 0x263c85, - 0x25ef07, - 0x260506, - 0x374345, - 0x263ecb, - 0x2662c4, - 0x26b005, - 0x26b147, - 0x27db86, - 0x27e045, - 0x289a47, - 0x28a187, - 0x2d5104, - 0x291b8a, - 0x292048, - 0x2cee09, - 0x2a0f05, - 0x3bf1c6, - 0x24e38a, - 0x2be906, - 0x26f2c7, - 0x2ceacd, - 0x2aa349, - 0x396fc5, - 0x339f07, - 0x333448, - 0x25a5c8, - 0x332847, - 0x358246, - 0x21cb87, - 0x253c43, - 0x34b1c4, - 0x371cc5, - 0x39d947, - 0x3a2409, - 0x231b08, - 0x34cbc5, - 0x23bac4, - 0x254a45, - 0x256c4d, - 0x2006c2, - 0x230386, - 0x2861c6, - 0x2e654a, - 0x3904c6, - 0x39ab85, - 0x342445, - 0x342447, - 0x3afd0c, - 0x27b3ca, - 0x294086, - 0x28ad05, - 0x294206, - 0x294547, - 0x296886, - 0x298a8c, - 0x34e989, - 0x4d21a187, - 0x29b745, - 0x29b746, - 0x29bcc8, - 0x246f85, - 0x2ab085, - 0x2ab808, - 0x2aba0a, - 0x4d6335c2, - 0x4da14d02, - 0x2e76c5, - 0x2eb603, - 0x243408, - 0x252403, - 0x2abc84, - 0x25240b, - 0x36b208, - 0x2daa48, - 0x4df3b049, - 0x2afc09, - 0x2b0746, - 0x2b1c08, - 0x2b1e09, - 0x2b2886, - 0x2b2a05, - 0x3944c6, - 0x2b2f49, - 0x389347, - 0x2647c6, - 0x2de087, - 0x218487, - 0x2dd9c4, - 0x4e34f809, - 0x2d32c8, - 0x3ac448, - 0x3932c7, - 0x2cd346, - 0x36c489, - 0x2ca847, - 0x32598a, - 0x358388, - 0x208387, - 0x208f86, - 0x271d8a, - 0x26fbc8, - 0x2ed485, - 0x230685, - 0x2ef1c7, - 0x311cc9, - 0x30150b, - 0x31a308, - 0x235049, - 0x254c87, - 0x2bd04c, - 0x2bfccc, - 0x2bffca, - 0x2c024c, - 0x2ca108, - 0x2ca308, - 0x2ca504, - 0x2caa09, - 0x2cac49, - 0x2cae8a, - 0x2cb109, - 0x2cb447, - 0x3ba98c, - 0x23f586, - 0x2cbf88, - 0x2be9c6, - 0x387486, - 0x396ec7, - 0x306dc8, - 0x3445cb, - 0x28e307, - 0x250289, - 0x350b89, - 0x253507, - 0x2771c4, - 0x271c07, - 0x2fda46, - 0x21d8c6, - 0x33c345, - 0x297248, - 0x2993c4, - 0x2993c6, - 0x27b28b, - 0x21bac9, - 0x36c886, - 0x204bc9, - 0x339586, - 0x25f1c8, - 0x211b83, - 0x300ac5, - 0x219b09, - 0x21da05, - 0x2fba44, - 0x27d046, - 0x2fd385, - 0x299906, - 0x310ec7, - 0x33a986, - 0x3b134b, - 0x237587, - 0x241646, - 0x354786, - 0x3b9e46, - 0x297989, - 0x25384a, - 0x2bbb85, - 0x2202cd, - 0x2abb06, - 0x204a86, - 0x2f3f06, - 0x22dd45, - 0x2e6ac7, - 0x300087, - 0x2e7dce, - 0x28cac3, - 0x2cd309, - 0x210c89, - 0x38d607, - 0x364207, - 0x2a5bc5, - 0x2b57c5, - 0x4e63470f, - 0x2d5a47, - 0x2d5c08, - 0x2d6144, - 0x2d7106, - 0x4ea4ea42, - 0x2da786, - 0x20c0c6, - 0x210e4e, - 0x2fa0ca, - 0x273b06, - 0x23398a, - 0x211689, - 0x32b385, - 0x3a4808, - 0x3bca06, - 0x306748, - 0x33aac8, - 0x2194cb, - 0x2bd805, - 0x378a88, - 0x20610c, - 0x2c3c07, - 0x254246, - 0x2fd1c8, - 0x3488c8, - 0x4ee06802, - 0x23588b, - 0x2123c9, - 0x205549, - 0x2174c7, - 0x223408, - 0x4f36bec8, - 0x38ffcb, - 0x23edc9, - 0x338f0d, - 0x27fa88, - 0x22b1c8, - 0x4f6014c2, - 0x203cc4, - 0x4fa19302, - 0x2fe206, - 0x4fe004c2, - 0x261b8a, - 0x2199c6, - 0x232808, - 0x2c6f48, - 0x2b6f06, - 0x22fe46, - 0x2f9186, - 0x2b5a45, - 0x2443c4, - 0x50206d04, - 0x214106, - 0x29c747, - 0x50620c47, - 0x2d644b, - 0x341ec9, - 0x33500a, - 0x2106c4, - 0x342588, - 0x26458d, - 0x2f2489, - 0x2f26c8, - 0x2f2d49, - 0x2f4544, - 0x245884, - 0x285cc5, - 0x320fcb, - 0x36b186, - 0x34b905, - 0x2279c9, - 0x2bda08, - 0x210dc4, - 0x38d389, - 0x2064c5, - 0x2c7708, - 0x342dc7, - 0x338b48, - 0x286d86, - 0x233207, - 0x29a989, - 0x224a49, - 0x38adc5, - 0x34dfc5, - 0x50a08402, - 0x32f1c4, - 0x2fdd45, - 0x2ce506, - 0x33bd05, - 0x387e47, - 0x214205, - 0x27dbc4, - 0x388e46, - 0x381b47, - 0x23d046, - 0x2c41c5, - 0x207f48, - 0x2bdf85, - 0x211a07, - 0x214689, - 0x21bc0a, - 0x2fc487, - 0x2fc48c, - 0x240a86, - 0x37e349, - 0x246a45, - 0x246ec8, - 0x207c03, - 0x216d85, - 0x2fd705, - 0x282d47, - 0x50e06ac2, - 0x22f647, - 0x2e56c6, - 0x373b46, - 0x30bfc6, - 0x348806, - 0x206748, - 0x2a0d05, - 0x2eb747, - 0x2eb74d, - 0x227103, - 0x227105, - 0x379347, - 0x22f988, - 0x378f05, - 0x2216c8, - 0x37ccc6, - 0x335b87, - 0x2cbec5, - 0x2bd886, - 0x39ce45, - 0x21c70a, - 0x2f1346, - 0x383f47, - 0x2bca85, - 0x2f5047, - 0x2f97c4, - 0x2fb9c6, - 0x2fe345, - 0x32d70b, - 0x2fd8c9, - 0x24214a, - 0x38ae48, - 0x30e048, - 0x380a8c, - 0x3964c7, - 0x3054c8, - 0x307f48, - 0x3084c5, - 0x311a8a, - 0x31c449, - 0x51200d02, - 0x201886, - 0x216044, - 0x216049, - 0x27d549, - 0x27e9c7, - 0x2b4e07, - 0x2938c9, - 0x22df48, - 0x22df4f, - 0x2e3a06, - 0x2df14b, - 0x34b445, - 0x34b447, - 0x368849, - 0x21aa46, - 0x38d307, - 0x2e1a45, - 0x23ae84, - 0x284fc6, - 0x2262c4, - 0x2db107, - 0x2d6f08, - 0x51700848, - 0x301245, - 0x301387, - 0x260a09, - 0x205304, - 0x24b348, - 0x51ab7cc8, - 0x283384, - 0x23c208, - 0x332d44, - 0x22be49, - 0x351a45, - 0x51e05082, - 0x2e3a45, - 0x310045, - 0x20fc48, - 0x23d747, - 0x52200d42, - 0x3322c5, - 0x2d8e46, - 0x27cb06, - 0x32f188, - 0x337d48, - 0x33bcc6, - 0x34bb06, - 0x38c289, - 0x373a86, - 0x21a90b, - 0x2e5f85, - 0x208a46, - 0x29e108, - 0x3a0a06, - 0x322c46, - 0x221b8a, - 0x23b30a, - 0x2498c5, - 0x2a0dc7, - 0x313646, - 0x52606442, - 0x379487, - 0x266cc5, - 0x24e304, - 0x24e305, - 0x2105c6, - 0x278fc7, - 0x215dc5, - 0x23b484, - 0x2c4788, - 0x322d05, - 0x3af347, - 0x3b6dc5, - 0x21c645, - 0x258f84, - 0x2ee209, - 0x3079c8, - 0x263146, - 0x2b5386, - 0x345186, - 0x52b08148, - 0x308347, - 0x30874d, - 0x3090cc, - 0x3096c9, - 0x309909, - 0x52f67742, - 0x3b6343, - 0x215ac3, - 0x2fdb05, - 0x39da4a, - 0x32f046, - 0x30e2c5, - 0x311084, - 0x31108b, - 0x323a8c, - 0x3244cc, - 0x3247d5, - 0x32660d, - 0x327d0f, - 0x3280d2, - 0x32854f, - 0x328912, - 0x328d93, - 0x32924d, - 0x32980d, - 0x329b8e, - 0x32a10e, - 0x32a94c, - 0x32ad0c, - 0x32b14b, - 0x32b4ce, - 0x32c612, - 0x32ee0c, - 0x32fd90, - 0x33cd52, - 0x33d9cc, - 0x33e08d, - 0x33e3cc, - 0x3406d1, - 0x34734d, - 0x349e0d, - 0x34a40a, - 0x34a68c, - 0x34af8c, - 0x34b60c, - 0x34c20c, - 0x3523d3, - 0x352cd0, - 0x3530d0, - 0x35398d, - 0x353f8c, - 0x354b89, - 0x35690d, - 0x356c53, - 0x3595d1, - 0x359a13, - 0x35a0cf, - 0x35a48c, - 0x35a78f, - 0x35ab4d, - 0x35b14f, - 0x35b510, - 0x35bf8e, - 0x35f88e, - 0x35fe10, - 0x36150d, - 0x361e8e, - 0x36220c, - 0x363213, - 0x3658ce, - 0x365f50, - 0x366351, - 0x36678f, - 0x366b53, - 0x3672cd, - 0x36760f, - 0x3679ce, - 0x368090, - 0x368489, - 0x369210, - 0x36980f, - 0x369e8f, - 0x36a252, - 0x36dcce, - 0x36e7cd, - 0x36f00d, - 0x36f34d, - 0x37078d, - 0x370acd, - 0x370e10, - 0x37120b, - 0x371a8c, - 0x371e0c, - 0x37240c, - 0x37270e, - 0x382350, - 0x384512, - 0x38498b, - 0x384e8e, - 0x38520e, - 0x386dce, - 0x38724b, - 0x53388016, - 0x38988d, - 0x38a014, - 0x38b04d, - 0x38cd55, - 0x38e30d, - 0x38ec8f, - 0x38f4cf, - 0x39238f, - 0x39274e, - 0x392ccd, - 0x394091, - 0x39668c, - 0x39698c, - 0x396c8b, - 0x39710c, - 0x3974cf, - 0x397892, - 0x39824d, - 0x39974c, - 0x399bcc, - 0x399ecd, - 0x39a20f, - 0x39a5ce, - 0x39d70c, - 0x39dccd, - 0x39e00b, - 0x39e9cc, - 0x39f2cd, - 0x39f60e, - 0x39f989, - 0x3a1353, - 0x3a188d, - 0x3a1bcd, - 0x3a21cc, - 0x3a264e, - 0x3a37cf, - 0x3a3b8c, - 0x3a3e8d, - 0x3a41cf, - 0x3a458c, - 0x3a508c, - 0x3a550c, - 0x3a580c, - 0x3a5ecd, - 0x3a6212, - 0x3a688c, - 0x3a6b8c, - 0x3a6e91, - 0x3a72cf, - 0x3a768f, - 0x3a7a53, - 0x3a8a0e, - 0x3a8d8f, - 0x3a914c, - 0x537a948e, - 0x3a980f, - 0x3a9bd6, - 0x3aaa92, - 0x3acf0c, - 0x3ada0f, - 0x3ae08d, - 0x3ae3cf, - 0x3ae78c, - 0x3aea8d, - 0x3aedcd, - 0x3b084e, - 0x3b228c, - 0x3b258c, - 0x3b2890, - 0x3b57d1, - 0x3b5c0b, - 0x3b5f4c, - 0x3b624e, - 0x3b7211, - 0x3b764e, - 0x3b79cd, - 0x3bc7cb, - 0x3bd88f, - 0x3be394, - 0x210642, - 0x210642, - 0x204d43, - 0x210642, - 0x204d43, - 0x210642, - 0x2009c2, - 0x394505, - 0x3b6f0c, - 0x210642, - 0x210642, - 0x2009c2, - 0x210642, - 0x29c345, - 0x21bc05, - 0x210642, - 0x210642, - 0x201102, - 0x29c345, - 0x326b49, - 0x3592cc, - 0x210642, - 0x210642, - 0x210642, - 0x210642, - 0x394505, - 0x210642, - 0x210642, - 0x210642, - 0x210642, - 0x201102, - 0x326b49, - 0x210642, - 0x210642, - 0x210642, - 0x21bc05, - 0x210642, - 0x21bc05, - 0x3592cc, - 0x3b6f0c, - 0x39c783, - 0x238543, - 0x23cac3, - 0x323043, - 0x231604, - 0x208e83, - 0x201a03, - 0xe008, - 0x64344, - 0xe03, - 0xc63c8, - 0x207102, - 0x5460f882, - 0x24ac83, - 0x23f044, - 0x2020c3, - 0x39e544, - 0x23a1c6, - 0x216f83, - 0x304704, - 0x2d7b05, - 0x28cac3, - 0x208e83, - 0x1a3443, - 0x201a03, - 0x243d0a, - 0x3821c6, - 0x38558c, - 0x16fb88, - 0x20f882, - 0x238543, - 0x23cac3, - 0x323043, - 0x229443, - 0x20c0c6, - 0x208e83, - 0x201a03, - 0x221483, - 0xac408, - 0x131645, - 0x35f09, - 0x35c2, - 0x55b95645, - 0x26547, - 0xba9c8, - 0x14b0e, - 0x90212, - 0x10a78b, - 0x1398c6, - 0x55edf485, - 0x562df48c, - 0x148f87, - 0x36dc7, - 0x15000a, - 0x46690, - 0x13b345, - 0xb610b, - 0xf8d08, - 0x3e607, - 0x3af8b, - 0x57f89, - 0x185a87, - 0x81a87, - 0x7e4c7, - 0x3e546, - 0xdd088, - 0x56824386, - 0xb084d, - 0x14f9d0, - 0x56c0c182, - 0x8ca48, - 0x4f450, - 0x15090c, - 0x5735cd4d, - 0x64a88, - 0x721c7, - 0x76f09, - 0x5d3c6, - 0x9bec8, - 0x351c2, - 0xa808a, - 0x293c7, - 0x43b87, - 0xac7c9, - 0xae208, - 0x8b205, - 0xd538e, - 0x5c4e, - 0x17a8f, - 0x18009, - 0x164ec9, - 0x15d38b, - 0x7ba8f, - 0xee40c, - 0xa88cb, - 0xc8b48, - 0xd6347, - 0xdbe88, - 0xfe78b, - 0xff34c, - 0x10038c, - 0x1037cc, - 0x10b54d, - 0x3ef48, - 0xd2942, - 0x134649, - 0x195d8b, - 0xcd546, - 0x11f30b, - 0xe118a, - 0xe1d45, - 0xe67d0, - 0xe9f06, - 0x16b986, - 0x11205, - 0x10fc48, - 0xefd07, - 0xeffc7, - 0x8d047, - 0xfe04a, - 0xba84a, - 0x86286, - 0x99d0d, - 0x8f148, - 0x586c8, - 0x58ec9, - 0xbc8c5, - 0x1ad70c, - 0x10b74b, - 0x19e604, - 0x105e09, - 0x106046, - 0x16546, - 0x2642, - 0x12cf06, - 0xc68b, - 0x112707, - 0x4542, - 0xd1305, - 0x2e604, - 0x8c1, - 0x52d03, - 0x56764886, - 0x9c243, - 0x7b02, - 0x293c4, - 0x242, - 0x86644, - 0xf82, - 0x6502, - 0x3302, - 0xd342, - 0x1382, - 0xdf482, - 0x8c2, - 0x22902, - 0x40e82, - 0x1a442, - 0x4c82, - 0x234c2, - 0x3cac3, - 0x6b82, - 0x1842, - 0x7602, - 0x6b02, - 0x17202, - 0x36d02, - 0x206c2, - 0xc442, - 0x1c82, - 0x942, - 0x55783, - 0x4182, - 0x2542, - 0xb8042, - 0x9a02, - 0x282, - 0x2942, - 0xd842, - 0xc202, - 0x4a82, - 0x182842, - 0x745c2, - 0xe82, - 0x8e83, - 0x1942, - 0x6802, - 0x982, - 0x5b82, - 0x18ad45, - 0x7082, - 0x2fa42, - 0x13ebc3, - 0x482, - 0xb282, - 0xa02, - 0x2502, - 0x6742, - 0xd42, - 0xc2, - 0x2642, - 0x35dc5, - 0x17f087, - 0x20d0c3, - 0x207102, - 0x238543, - 0x23cac3, - 0x21b583, - 0x2046c3, - 0x229443, - 0x208e83, - 0x200e03, - 0x201a03, - 0x29c283, - 0x10c3, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x21b583, - 0x28cac3, - 0x208e83, - 0x200e03, - 0x1a3443, - 0x201a03, - 0x238543, - 0x23cac3, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x200041, - 0x28cac3, - 0x208e83, - 0x21b543, - 0x201a03, - 0x146f44, - 0x39c783, - 0x238543, - 0x23cac3, - 0x26eac3, - 0x21b583, - 0x207b03, - 0x289303, - 0x219983, - 0x241503, - 0x323043, - 0x231604, - 0x208e83, - 0x201a03, - 0x202443, - 0x333cc4, - 0x251183, - 0x3ec3, - 0x3c0943, - 0x20a3c8, - 0x271dc4, - 0x2cf30a, - 0x2bed86, - 0x112384, - 0x3a7ec7, - 0x226cca, - 0x2e38c9, - 0x3b7f87, - 0x3be84a, - 0x39c783, - 0x2e774b, - 0x28b689, - 0x345285, - 0x2da5c7, - 0xf882, - 0x238543, - 0x21a447, - 0x2379c5, - 0x2ca689, - 0x23cac3, - 0x2bd606, - 0x2c9883, - 0xe5743, - 0x110646, - 0xd386, - 0x16f07, - 0x21af86, - 0x222985, - 0x3a3147, - 0x2de5c7, - 0x59b23043, - 0x33dc07, - 0x374703, - 0x3b5045, - 0x231604, - 0x231308, - 0x366fcc, - 0x2b4fc5, - 0x2aa4c6, - 0x21a307, - 0x39b687, - 0x23dfc7, - 0x23f108, - 0x30f50f, - 0x2e3b05, - 0x24ad87, - 0x33acc7, - 0x2abdca, - 0x2d29c9, - 0x39e6c5, - 0x31078a, - 0xc546, - 0x2c9905, - 0x3703c4, - 0x2c6e86, - 0x300e07, - 0x2d2847, - 0x306908, - 0x217645, - 0x2378c6, - 0x214f85, - 0x2e8105, - 0x21ba04, - 0x2b6e07, - 0x20658a, - 0x34d908, - 0x367f06, - 0x29443, - 0x2e4505, - 0x26bf86, - 0x3babc6, - 0x211106, - 0x28cac3, - 0x3984c7, - 0x33ac45, - 0x208e83, - 0x2e144d, - 0x200e03, - 0x306a08, - 0x3b3644, - 0x310945, - 0x2abcc6, - 0x23f386, - 0x208947, - 0x2aed47, - 0x26f045, - 0x201a03, - 0x20a147, - 0x277089, - 0x36bbc9, - 0x227f4a, - 0x235d82, - 0x3b5004, - 0x2eb2c4, - 0x344487, - 0x22f508, - 0x2f0889, - 0x226fc9, - 0x2f1ac7, - 0x28bb46, - 0xf3006, - 0x2f4544, - 0x2f4b4a, - 0x2f8248, - 0x2f9049, - 0x2c4bc6, - 0x2b9545, - 0x34d7c8, - 0x2cdc4a, - 0x20ec43, - 0x333e46, - 0x2f1bc7, - 0x225f45, - 0x3b3505, - 0x3a04c3, - 0x231944, - 0x230645, - 0x28a287, - 0x307b05, - 0x2ef086, - 0x103d45, - 0x273bc3, - 0x273bc9, - 0x26c04c, - 0x2a2b4c, - 0x2d8648, - 0x284187, - 0x301e08, - 0x30214a, - 0x302fcb, - 0x28b7c8, - 0x23ec48, - 0x23f486, - 0x345045, - 0x34624a, - 0x228cc5, - 0x205082, - 0x2cbd87, - 0x29f806, - 0x368d45, - 0x304209, - 0x281405, - 0x3716c5, - 0x218ac9, - 0x388a46, - 0x204448, - 0x332643, - 0x217186, - 0x27cf86, - 0x311f05, - 0x311f09, - 0x2f0fc9, - 0x27a3c7, - 0x114204, - 0x314207, - 0x226ec9, - 0x23f805, - 0x444c8, - 0x39c485, - 0x341a05, - 0x3911c9, - 0x20cac2, - 0x2628c4, - 0x200882, - 0x204182, - 0x30e985, - 0x312108, - 0x2bc805, - 0x2cb603, - 0x2cb605, - 0x2da983, - 0x2162c2, - 0x383c84, - 0x2fc183, - 0x20cb42, - 0x341504, - 0x2ec043, - 0x206682, - 0x28cfc3, - 0x295384, - 0x2eae03, - 0x2f6584, - 0x204242, - 0x221383, - 0x219c43, - 0x206182, - 0x332182, - 0x2f0e09, - 0x204382, - 0x290d84, - 0x201f82, - 0x34d644, - 0x28bb04, - 0x2c0d84, - 0x202642, - 0x23e882, - 0x229703, - 0x302d83, - 0x24a9c4, - 0x28a404, - 0x2f1d44, - 0x2f8404, - 0x315743, - 0x224183, - 0x20c4c4, - 0x315584, - 0x315d86, - 0x232ec2, - 0x20f882, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x207102, - 0x39c783, - 0x238543, - 0x23cac3, - 0x201843, - 0x323043, - 0x231604, - 0x2f10c4, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x221483, - 0x2f5204, - 0x31fe83, - 0x2c37c3, - 0x359e44, - 0x39c286, - 0x211c43, - 0x36dc7, - 0x21f243, - 0x202103, - 0x2b8d83, - 0x263a43, - 0x229443, - 0x3321c5, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x216403, - 0x239043, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x255783, - 0x208e83, - 0x2464c4, - 0x1a3443, - 0x201a03, - 0x25b0c4, - 0x2c6c85, - 0x36dc7, - 0x20f882, - 0x201742, - 0x207b02, - 0x204d42, - 0xe03, - 0x200442, - 0x238543, - 0x240244, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x21bf84, - 0x208e83, - 0xe03, - 0x201a03, - 0x215443, - 0x286644, - 0x16fb88, - 0x238543, - 0x200e03, - 0x10c3, - 0x13e8c4, - 0x252044, - 0x16fb88, - 0x238543, - 0x253384, - 0x231604, - 0x200e03, - 0x2014c2, - 0x201a03, - 0x20c843, - 0x31944, - 0x355685, - 0x205082, - 0x3156c3, - 0x145c49, - 0xdfb46, - 0x19c588, - 0x207102, - 0x16fb88, - 0x20f882, - 0x23cac3, - 0x323043, - 0x200942, - 0xe03, - 0x201a03, - 0x207102, - 0x1bea07, - 0x1370c9, - 0x3dc3, - 0x16fb88, - 0xd303, - 0x5db4c807, - 0x38543, - 0x1788, - 0x23cac3, - 0x323043, - 0x186c46, - 0x255783, - 0xe8888, - 0xc9148, - 0x3fbc6, - 0x28cac3, - 0xd30c8, - 0x187ec3, - 0xe8a85, - 0x3ccc7, - 0x8e83, - 0x63c3, - 0x1a03, - 0xcb02, - 0x17044a, - 0x10ea43, - 0x313e44, - 0x10f30b, - 0x10f8c8, - 0x95e02, - 0x207102, - 0x20f882, - 0x238543, - 0x23cac3, - 0x2de944, - 0x323043, - 0x255783, - 0x28cac3, - 0x208e83, - 0x238543, - 0x23cac3, - 0x323043, - 0x229443, - 0x208e83, - 0x201a03, - 0x236903, - 0x215443, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x10c3, - 0x238543, - 0x23cac3, - 0x323043, - 0x231604, - 0x229443, - 0x208e83, - 0x201a03, - 0x21a902, - 0x200141, - 0x207102, - 0x200001, - 0x327e02, - 0x16fb88, - 0x224c85, - 0x2008c1, - 0x38543, - 0x201781, - 0x200301, - 0x200081, - 0x2ac602, - 0x37cc44, - 0x394483, - 0x200181, - 0x200401, - 0x200041, - 0x200101, - 0x2ea547, - 0x2ec54f, - 0x2fbc06, - 0x200281, - 0x33e906, - 0x200801, - 0x200981, - 0x306f8e, - 0x200441, - 0x201a03, - 0x204101, - 0x258885, - 0x20cb02, - 0x3a03c5, - 0x200341, - 0x200741, - 0x2002c1, - 0x205082, - 0x2000c1, - 0x200201, - 0x200c81, - 0x2005c1, - 0x204541, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x221f43, - 0x238543, - 0x323043, - 0x95d48, - 0x28cac3, - 0x208e83, - 0x31483, - 0x201a03, - 0x14eec08, - 0x16308, - 0x16fb88, - 0xe03, - 0x8e444, - 0x4ec04, - 0x14eec0a, - 0x16fb88, - 0x1a3443, - 0x238543, - 0x23cac3, - 0x323043, - 0x208e83, - 0x201a03, - 0x203ec3, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x2de944, - 0x201a03, - 0x22d585, - 0x35f2c4, - 0x238543, - 0x208e83, - 0x201a03, - 0x1f40a, - 0xf1844, - 0x118b06, - 0x20f882, - 0x238543, - 0x23adc9, - 0x23cac3, - 0x375449, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x2f4348, - 0x22dc07, - 0x355685, - 0xb4c8, - 0x1bea07, - 0x2f78a, - 0x178ccb, - 0x13c507, - 0x4a4c8, - 0x14f64a, - 0x19dc8, - 0x1370c9, - 0x30507, - 0x742c7, - 0x19bf08, - 0x1788, - 0x4b04f, - 0x1c045, - 0x1a87, - 0x186c46, - 0x41287, - 0x4a786, - 0xe8888, - 0x96fc6, - 0x188847, - 0x178809, - 0x1bf307, - 0xd81c9, - 0xbcbc9, - 0xc6a06, - 0xc9148, - 0xc7845, - 0x57b0a, - 0xd30c8, - 0x187ec3, - 0xdad48, - 0x3ccc7, - 0x131f45, - 0x787d0, - 0x63c3, - 0x1a3443, - 0x125807, - 0x1cc85, - 0xf02c8, - 0xe385, - 0x10ea43, - 0x16d5c8, - 0x12906, - 0x198909, - 0xb2007, - 0x145f0b, - 0x180884, - 0x104f04, - 0x10f30b, - 0x10f8c8, - 0x110547, - 0x131645, - 0x238543, - 0x23cac3, - 0x21b583, - 0x201a03, - 0x20c743, - 0x323043, - 0x1a3443, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x15d4cb, - 0x207102, - 0x20f882, - 0x201a03, - 0x16fb88, - 0x207102, - 0x20f882, - 0x207b02, - 0x200942, - 0x20b302, - 0x208e83, - 0x200442, - 0x207102, - 0x39c783, - 0x20f882, - 0x238543, - 0x23cac3, - 0x207b02, - 0x323043, - 0x255783, - 0x28cac3, - 0x21bf84, - 0x208e83, - 0x21eb43, - 0x201a03, - 0x313e44, - 0x202443, - 0x323043, - 0x20f882, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x200e03, - 0x201a03, - 0x3ad3c7, - 0x238543, - 0x282c07, - 0x2d7f86, - 0x20e583, - 0x207603, - 0x323043, - 0x204c03, - 0x231604, - 0x2d5204, - 0x30e706, - 0x20bd43, - 0x208e83, - 0x201a03, - 0x22d585, - 0x321704, - 0x350503, - 0x39b4c3, - 0x2cbd87, - 0x342d45, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x99807, - 0x203402, - 0x28f283, - 0x205403, - 0x39c783, - 0x65e38543, - 0x206902, - 0x23cac3, - 0x2020c3, - 0x323043, - 0x231604, - 0x3797c3, - 0x2e3b03, - 0x28cac3, - 0x21bf84, - 0x6620ea42, - 0x208e83, - 0x201a03, - 0x206683, - 0x22e603, - 0x21a902, - 0x202443, - 0x16fb88, - 0x323043, - 0x10c3, - 0x31f944, - 0x39c783, - 0x20f882, - 0x238543, - 0x240244, - 0x23cac3, - 0x323043, - 0x231604, - 0x255783, - 0x3a2e44, - 0x20f644, - 0x20c0c6, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x221483, - 0x29f806, - 0x4504b, - 0x24386, - 0x3204a, - 0x112d0a, - 0x16fb88, - 0x214f44, - 0x67638543, - 0x39c744, - 0x23cac3, - 0x259004, - 0x323043, - 0x210543, - 0x28cac3, - 0x208e83, - 0x1a3443, - 0x201a03, - 0xbac3, - 0x3381cb, - 0x3af10a, - 0x3bf84c, - 0xe4288, - 0x207102, - 0x20f882, - 0x207b02, - 0x2b13c5, - 0x231604, - 0x204a82, - 0x28cac3, - 0x20f644, - 0x204d42, - 0x200442, - 0x20d2c2, - 0x21a902, - 0x19c783, - 0x35f42, - 0x2b3509, - 0x2f7148, - 0x351689, - 0x2410c9, - 0x350f0a, - 0x26080a, - 0x2127c2, - 0x222902, - 0xf882, - 0x238543, - 0x229682, - 0x24af46, - 0x369d02, - 0x206a42, - 0x37904e, - 0x2213ce, - 0x284b47, - 0x208e07, - 0x2ec8c2, - 0x23cac3, - 0x323043, - 0x200042, - 0x200942, - 0x31603, - 0x23980f, - 0x20b542, - 0x2dd887, - 0x2b4a87, - 0x2b7e87, - 0x31a4cc, - 0x2c448c, - 0x223984, - 0x285b0a, - 0x221302, - 0x209a02, - 0x2c0884, - 0x21f502, - 0x2ca102, - 0x2c46c4, - 0x21a602, - 0x200282, - 0x11a83, - 0x297047, - 0x2beb05, - 0x20d842, - 0x239784, - 0x382842, - 0x2e3008, - 0x208e83, - 0x203488, - 0x203cc2, - 0x223b45, - 0x38dbc6, - 0x201a03, - 0x207082, - 0x2f0ac7, - 0xcb02, - 0x2797c5, - 0x358b85, - 0x209642, - 0x20fd02, - 0x2cf9ca, - 0x26eeca, - 0x21b9c2, - 0x2a4dc4, - 0x2002c2, - 0x3b4ec8, - 0x20d582, - 0x315b08, - 0x30ab47, - 0x30ba09, - 0x203442, - 0x310e45, - 0x3044c5, - 0x21770b, - 0x2d054c, - 0x237348, - 0x321b08, - 0x232ec2, - 0x208a02, - 0x207102, - 0x16fb88, - 0x20f882, - 0x238543, - 0x207b02, - 0x204d42, - 0xe03, - 0x200442, - 0x201a03, - 0x20d2c2, - 0x207102, - 0x68a0f882, - 0x68f23043, - 0x211a83, - 0x204a82, - 0x208e83, - 0x391783, - 0x201a03, - 0x2ef783, - 0x37f186, - 0x1615443, - 0x16fb88, - 0x11205, - 0xae90d, - 0xacc8a, - 0x6e487, - 0x69601e02, - 0x69a00242, - 0x69e00bc2, - 0x6a200702, - 0x6a60b5c2, - 0x6aa01382, - 0x36dc7, - 0x6ae0f882, - 0x6b20c8c2, - 0x6b604842, - 0x6ba04c82, - 0x2213c3, - 0x18ec4, - 0x2298c3, - 0x6be1d882, - 0x6c200182, - 0x53c47, - 0x6c60a442, - 0x6ca00782, - 0x6ce01bc2, - 0x6d205e82, - 0x6d601c82, - 0x6da00942, - 0xc2845, - 0x23ef43, - 0x281a04, - 0x6de1f502, - 0x6e205242, - 0x6e603582, - 0x17d50b, - 0x6ea01fc2, - 0x6f253442, - 0x6f604a82, - 0x6fa0b302, - 0x6fe14702, - 0x70200802, - 0x70614642, - 0x70a745c2, - 0x70e0ea42, - 0x71204802, - 0x71604d42, - 0x71a03382, - 0x71e08682, - 0x7224d382, - 0x1a3284, - 0x35efc3, - 0x72604f82, - 0x72a10902, - 0x72e11542, - 0x73201f02, - 0x73600442, - 0x73a0cb42, - 0x15d647, - 0x73e04102, - 0x74204142, - 0x7460d2c2, - 0x74a21382, - 0x1ad70c, - 0x74e2a202, - 0x75245542, - 0x75605942, - 0x75a06442, - 0x75e0c402, - 0x76260982, - 0x76600202, - 0x76a16fc2, - 0x76e7d302, - 0x772610c2, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x12143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x6ef797c3, - 0x212143, - 0x332244, - 0x2f7046, - 0x2f9a03, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x244949, - 0x235f42, - 0x26c783, - 0x2bcec3, - 0x20fbc5, - 0x2020c3, - 0x3797c3, - 0x212143, - 0x20c0c3, - 0x248d43, - 0x242989, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x3797c3, - 0x212143, - 0x235f42, - 0x235f42, - 0x3797c3, - 0x212143, - 0x77a38543, - 0x23cac3, - 0x20a6c3, - 0x28cac3, - 0x208e83, - 0xe03, - 0x201a03, - 0x16fb88, - 0x20f882, - 0x238543, - 0x208e83, - 0x201a03, - 0x238543, - 0x23cac3, - 0x323043, - 0x28cac3, - 0x208e83, - 0xe03, - 0x201a03, - 0x252044, - 0x20f882, - 0x238543, - 0x345903, - 0x23cac3, - 0x253384, - 0x21b583, - 0x323043, - 0x231604, - 0x255783, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x20c843, - 0x355685, - 0x248d43, - 0x202443, - 0xe03, - 0x20f882, - 0x238543, - 0x3797c3, - 0x208e83, - 0x201a03, - 0x207102, - 0x39c783, - 0x16fb88, - 0x238543, - 0x23cac3, - 0x323043, - 0x23a1c6, - 0x231604, - 0x255783, - 0x21bf84, - 0x208e83, - 0x201a03, - 0x221483, - 0x238543, - 0x23cac3, - 0x208e83, - 0x201a03, - 0x1442047, - 0x238543, - 0x24386, - 0x23cac3, - 0x323043, - 0xe5586, - 0x208e83, - 0x201a03, - 0x31dc48, - 0x321949, - 0x330189, - 0x33bb08, - 0x38fb48, - 0x38fb49, - 0x24558d, - 0x24dd8f, - 0x2f53d0, - 0x35648d, - 0x37210c, - 0x39064b, - 0xba9c8, - 0xac605, - 0x207102, - 0x342b85, - 0x200243, - 0x7ae0f882, - 0x23cac3, - 0x323043, - 0x2d8c47, - 0x263a43, - 0x28cac3, - 0x208e83, - 0x21b543, - 0x217e03, - 0x200e03, - 0x201a03, - 0x3821c6, - 0x205082, - 0x202443, - 0x16fb88, - 0x207102, - 0x39c783, - 0x20f882, - 0x238543, - 0x23cac3, - 0x323043, - 0x231604, - 0x28cac3, - 0x208e83, - 0x201a03, - 0x215443, - 0x106904, - 0x15217c6, - 0x207102, - 0x20f882, - 0x323043, - 0x28cac3, - 0x201a03, -} - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// [ 1 bits] unused -// [ 1 bits] wildcard bit -// [ 2 bits] node type -// [14 bits] high nodes index (exclusive) of children -// [14 bits] low nodes index (inclusive) of children -var children = [...]uint32{ - 0x0, - 0x10000000, - 0x20000000, - 0x40000000, - 0x50000000, - 0x60000000, - 0x186c615, - 0x187061b, - 0x189461c, - 0x19f0625, - 0x1a0467c, - 0x1a18681, - 0x1a2c686, - 0x1a4c68b, - 0x1a50693, - 0x1a68694, - 0x1a9069a, - 0x1a946a4, - 0x1aac6a5, - 0x1ab06ab, - 0x1ab46ac, - 0x1af06ad, - 0x1af46bc, - 0x21afc6bd, - 0x1b446bf, - 0x1b486d1, - 0x1b686d2, - 0x1b7c6da, - 0x1b806df, - 0x1bb06e0, - 0x1bcc6ec, - 0x1bf46f3, - 0x1c006fd, - 0x1c04700, - 0x1c9c701, - 0x1cb0727, - 0x1cc472c, - 0x1cf4731, - 0x1d0473d, - 0x1d18741, - 0x1d3c746, - 0x1e7474f, - 0x1e7879d, - 0x1ee479e, - 0x1f507b9, - 0x1f687d4, - 0x1f7c7da, - 0x1f847df, - 0x1f987e1, - 0x1f9c7e6, - 0x1fb87e7, - 0x20047ee, - 0x2020801, - 0x2024808, - 0x2028809, - 0x204480a, - 0x2080811, - 0x62084820, - 0x209c821, - 0x20b4827, - 0x20b882d, - 0x20c882e, - 0x2178832, - 0x217c85e, - 0x2218c85f, - 0x22190863, - 0x22194864, - 0x21cc865, - 0x21d0873, - 0x2658874, - 0x226f8996, - 0x226fc9be, - 0x227009bf, - 0x2270c9c0, - 0x227109c3, - 0x2271c9c4, - 0x227209c7, - 0x227249c8, - 0x227289c9, - 0x2272c9ca, - 0x227309cb, - 0x2273c9cc, - 0x227409cf, - 0x2274c9d0, - 0x227509d3, - 0x227549d4, - 0x227589d5, - 0x227649d6, - 0x227689d9, - 0x2276c9da, - 0x227709db, - 0x27749dc, - 0x227789dd, - 0x227849de, - 0x227889e1, - 0x27909e2, - 0x27cc9e4, - 0x227ec9f3, - 0x227f09fb, - 0x227f49fc, - 0x27f89fd, - 0x227fc9fe, - 0x28009ff, - 0x281ca00, - 0x2834a07, - 0x2838a0d, - 0x2848a0e, - 0x2854a12, - 0x2888a15, - 0x288ca22, - 0x28a0a23, - 0x228a8a28, - 0x2968a2a, - 0x2296ca5a, - 0x2974a5b, - 0x2978a5d, - 0x2990a5e, - 0x29a4a64, - 0x29cca69, - 0x29eca73, - 0x2a1ca7b, - 0x2a44a87, - 0x2a48a91, - 0x2a6ca92, - 0x2a70a9b, - 0x2a84a9c, - 0x2a88aa1, - 0x2a8caa2, - 0x2aacaa3, - 0x2ac8aab, - 0x2accab2, - 0x22ad0ab3, - 0x2ad4ab4, - 0x2ad8ab5, - 0x2ae8ab6, - 0x2aecaba, - 0x2b64abb, - 0x2b68ad9, - 0x2b84ada, - 0x2b94ae1, - 0x2ba8ae5, - 0x2bc0aea, - 0x2bd8af0, - 0x2bf0af6, - 0x2bf4afc, - 0x2c0cafd, - 0x2c28b03, - 0x2c48b0a, - 0x2c60b12, - 0x2cc0b18, - 0x2cdcb30, - 0x2ce4b37, - 0x2ce8b39, - 0x2cfcb3a, - 0x2d40b3f, - 0x2dc0b50, - 0x2decb70, - 0x2df0b7b, - 0x2df8b7c, - 0x2e18b7e, - 0x2e1cb86, - 0x2e40b87, - 0x2e48b90, - 0x2e84b92, - 0x2ec8ba1, - 0x2eccbb2, - 0x2f34bb3, - 0x2f38bcd, - 0x22f3cbce, - 0x22f40bcf, - 0x22f50bd0, - 0x22f54bd4, - 0x22f58bd5, - 0x22f5cbd6, - 0x22f60bd7, - 0x2f78bd8, - 0x2f9cbde, - 0x2fbcbe7, - 0x3580bef, - 0x358cd60, - 0x35acd63, - 0x3768d6b, - 0x3838dda, - 0x38a8e0e, - 0x3900e2a, - 0x39e8e40, - 0x3a40e7a, - 0x3a7ce90, - 0x3b78e9f, - 0x3c44ede, - 0x3cdcf11, - 0x3d6cf37, - 0x3dd0f5b, - 0x4008f74, - 0x40c1002, - 0x418d030, - 0x41d9063, - 0x4261076, - 0x429d098, - 0x42ed0a7, - 0x43650bb, - 0x643690d9, - 0x6436d0da, - 0x643710db, - 0x43ed0dc, - 0x44490fb, - 0x44c5112, - 0x453d131, - 0x45bd14f, - 0x462916f, - 0x475518a, - 0x47ad1d5, - 0x647b11eb, - 0x48491ec, - 0x48d1212, - 0x491d234, - 0x4985247, - 0x4a2d261, - 0x4af528b, - 0x4b5d2bd, - 0x4c712d7, - 0x64c7531c, - 0x64c7931d, - 0x4cd531e, - 0x4d31335, - 0x4dc134c, - 0x4e3d370, - 0x4e8138f, - 0x4f653a0, - 0x4f993d9, - 0x4ff93e6, - 0x506d3fe, - 0x50f541b, - 0x513543d, - 0x51a544d, - 0x651a9469, - 0x651ad46a, - 0x251b146b, - 0x51c946c, - 0x51e5472, - 0x5229479, - 0x523948a, - 0x525148e, - 0x52c9494, - 0x52d14b2, - 0x52e54b4, - 0x53014b9, - 0x532d4c0, - 0x53314cb, - 0x53394cc, - 0x534d4ce, - 0x53694d3, - 0x53754da, - 0x537d4dd, - 0x53b94df, - 0x53cd4ee, - 0x53d54f3, - 0x53e14f5, - 0x53e94f8, - 0x540d4fa, - 0x5431503, - 0x544950c, - 0x544d512, - 0x5455513, - 0x5459515, - 0x54c1516, - 0x54c5530, - 0x54e9531, - 0x550d53a, - 0x5529543, - 0x553954a, - 0x554d54e, - 0x5551553, - 0x5559554, - 0x556d556, - 0x557d55b, - 0x558155f, - 0x559d560, - 0x5e2d567, - 0x5e6578b, - 0x5e91799, - 0x5ead7a4, - 0x5ecd7ab, - 0x5eed7b3, - 0x5f317bb, - 0x5f397cc, - 0x25f3d7ce, - 0x25f417cf, - 0x5f497d0, - 0x60c17d2, - 0x260c5830, - 0x260d5831, - 0x260dd835, - 0x260e9837, - 0x60ed83a, - 0x60f183b, - 0x611983c, - 0x6141846, - 0x6145850, - 0x617d851, - 0x619985f, - 0x6cf1866, - 0x6cf5b3c, - 0x6cf9b3d, - 0x26cfdb3e, - 0x6d01b3f, - 0x26d05b40, - 0x6d09b41, - 0x26d15b42, - 0x6d19b45, - 0x6d1db46, - 0x26d21b47, - 0x6d25b48, - 0x26d2db49, - 0x6d31b4b, - 0x6d35b4c, - 0x26d45b4d, - 0x6d49b51, - 0x6d4db52, - 0x6d51b53, - 0x6d55b54, - 0x26d59b55, - 0x6d5db56, - 0x6d61b57, - 0x6d65b58, - 0x6d69b59, - 0x26d71b5a, - 0x6d75b5c, - 0x6d79b5d, - 0x6d7db5e, - 0x26d81b5f, - 0x6d85b60, - 0x26d8db61, - 0x26d91b63, - 0x6dadb64, - 0x6dbdb6b, - 0x6e01b6f, - 0x6e05b80, - 0x6e29b81, - 0x6e2db8a, - 0x6e31b8b, - 0x6fbdb8c, - 0x26fc1bef, - 0x26fc9bf0, - 0x26fcdbf2, - 0x26fd1bf3, - 0x6fd9bf4, - 0x70b5bf6, - 0x270b9c2d, - 0x70bdc2e, - 0x70e9c2f, - 0x70edc3a, - 0x7111c3b, - 0x711dc44, - 0x713dc47, - 0x7141c4f, - 0x7179c50, - 0x7411c5e, - 0x74cdd04, - 0x74e1d33, - 0x7515d38, - 0x7545d45, - 0x7561d51, - 0x7589d58, - 0x75a9d62, - 0x75c5d6a, - 0x75edd71, - 0x75fdd7b, - 0x7601d7f, - 0x7605d80, - 0x7639d81, - 0x7645d8e, - 0x7665d91, - 0x76ddd99, - 0x276e1db7, - 0x7705db8, - 0x7725dc1, - 0x7739dc9, - 0x774ddce, - 0x7751dd3, - 0x7771dd4, - 0x7815ddc, - 0x7831e05, - 0x7855e0c, - 0x785de15, - 0x7869e17, - 0x7871e1a, - 0x7885e1c, - 0x78a5e21, - 0x78b1e29, - 0x78bde2c, - 0x78ede2f, - 0x79c1e3b, - 0x79c5e70, - 0x79d9e71, - 0x79e1e76, - 0x79f9e78, - 0x79fde7e, - 0x7a09e7f, - 0x7a0de82, - 0x7a29e83, - 0x7a65e8a, - 0x7a69e99, - 0x7a89e9a, - 0x7ad9ea2, - 0x7af5eb6, - 0x7b49ebd, - 0x7b4ded2, - 0x7b51ed3, - 0x7b55ed4, - 0x7b99ed5, - 0x7ba9ee6, - 0x7be9eea, - 0x7bedefa, - 0x7c1defb, - 0x7d65f07, - 0x7d8df59, - 0x7db9f63, - 0x7dc5f6e, - 0x7dcdf71, - 0x7eddf73, - 0x7ee9fb7, - 0x7ef5fba, - 0x7f01fbd, - 0x7f0dfc0, - 0x7f19fc3, - 0x7f25fc6, - 0x7f31fc9, - 0x7f3dfcc, - 0x7f49fcf, - 0x7f55fd2, - 0x7f61fd5, - 0x7f6dfd8, - 0x7f79fdb, - 0x7f81fde, - 0x7f8dfe0, - 0x7f99fe3, - 0x7fa5fe6, - 0x7fb1fe9, - 0x7fbdfec, - 0x7fc9fef, - 0x7fd5ff2, - 0x7fe1ff5, - 0x7fedff8, - 0x7ff9ffb, - 0x8005ffe, - 0x8032001, - 0x803e00c, - 0x804a00f, - 0x8056012, - 0x8062015, - 0x806e018, - 0x807601b, - 0x808201d, - 0x808e020, - 0x809a023, - 0x80a6026, - 0x80b2029, - 0x80be02c, - 0x80ca02f, - 0x80d6032, - 0x80e2035, - 0x80ee038, - 0x80fa03b, - 0x810603e, - 0x8112041, - 0x811a044, - 0x8126046, - 0x8132049, - 0x813e04c, - 0x814a04f, - 0x8156052, - 0x8162055, - 0x816e058, - 0x817a05b, - 0x817e05e, - 0x818a05f, - 0x81a6062, - 0x81aa069, - 0x81ba06a, - 0x81d606e, - 0x821a075, - 0x821e086, - 0x8232087, - 0x826608c, - 0x8276099, - 0x829609d, - 0x82ae0a5, - 0x82c60ab, - 0x82ce0b1, - 0x283120b3, - 0x83160c4, - 0x83420c5, - 0x834a0d0, - 0x835e0d2, -} - -// max children 494 (capacity 511) -// max text offset 28750 (capacity 32767) -// max text length 36 (capacity 63) -// max hi 8407 (capacity 16383) -// max lo 8402 (capacity 16383) diff --git a/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/golang.org/x/net/publicsuffix/table_test.go deleted file mode 100644 index 62610185..00000000 --- a/vendor/golang.org/x/net/publicsuffix/table_test.go +++ /dev/null @@ -1,16756 +0,0 @@ -// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -var rules = [...]string{ - "ac", - "com.ac", - "edu.ac", - "gov.ac", - "net.ac", - "mil.ac", - "org.ac", - "ad", - "nom.ad", - "ae", - "co.ae", - "net.ae", - "org.ae", - "sch.ae", - "ac.ae", - "gov.ae", - "mil.ae", - "aero", - "accident-investigation.aero", - "accident-prevention.aero", - "aerobatic.aero", - "aeroclub.aero", - "aerodrome.aero", - "agents.aero", - "aircraft.aero", - "airline.aero", - "airport.aero", - "air-surveillance.aero", - "airtraffic.aero", - "air-traffic-control.aero", - "ambulance.aero", - "amusement.aero", - "association.aero", - "author.aero", - "ballooning.aero", - "broker.aero", - "caa.aero", - "cargo.aero", - "catering.aero", - "certification.aero", - "championship.aero", - "charter.aero", - "civilaviation.aero", - "club.aero", - "conference.aero", - "consultant.aero", - "consulting.aero", - "control.aero", - "council.aero", - "crew.aero", - "design.aero", - "dgca.aero", - "educator.aero", - "emergency.aero", - "engine.aero", - "engineer.aero", - "entertainment.aero", - "equipment.aero", - "exchange.aero", - "express.aero", - "federation.aero", - "flight.aero", - "freight.aero", - "fuel.aero", - "gliding.aero", - "government.aero", - "groundhandling.aero", - "group.aero", - "hanggliding.aero", - "homebuilt.aero", - "insurance.aero", - "journal.aero", - "journalist.aero", - "leasing.aero", - "logistics.aero", - "magazine.aero", - "maintenance.aero", - "media.aero", - "microlight.aero", - "modelling.aero", - "navigation.aero", - "parachuting.aero", - "paragliding.aero", - "passenger-association.aero", - "pilot.aero", - "press.aero", - "production.aero", - "recreation.aero", - "repbody.aero", - "res.aero", - "research.aero", - "rotorcraft.aero", - "safety.aero", - "scientist.aero", - "services.aero", - "show.aero", - "skydiving.aero", - "software.aero", - "student.aero", - "trader.aero", - "trading.aero", - "trainer.aero", - "union.aero", - "workinggroup.aero", - "works.aero", - "af", - "gov.af", - "com.af", - "org.af", - "net.af", - "edu.af", - "ag", - "com.ag", - "org.ag", - "net.ag", - "co.ag", - "nom.ag", - "ai", - "off.ai", - "com.ai", - "net.ai", - "org.ai", - "al", - "com.al", - "edu.al", - "gov.al", - "mil.al", - "net.al", - "org.al", - "am", - "ao", - "ed.ao", - "gv.ao", - "og.ao", - "co.ao", - "pb.ao", - "it.ao", - "aq", - "ar", - "com.ar", - "edu.ar", - "gob.ar", - "gov.ar", - "int.ar", - "mil.ar", - "musica.ar", - "net.ar", - "org.ar", - "tur.ar", - "arpa", - "e164.arpa", - "in-addr.arpa", - "ip6.arpa", - "iris.arpa", - "uri.arpa", - "urn.arpa", - "as", - "gov.as", - "asia", - "at", - "ac.at", - "co.at", - "gv.at", - "or.at", - "au", - "com.au", - "net.au", - "org.au", - "edu.au", - "gov.au", - "asn.au", - "id.au", - "info.au", - "conf.au", - "oz.au", - "act.au", - "nsw.au", - "nt.au", - "qld.au", - "sa.au", - "tas.au", - "vic.au", - "wa.au", - "act.edu.au", - "nsw.edu.au", - "nt.edu.au", - "qld.edu.au", - "sa.edu.au", - "tas.edu.au", - "vic.edu.au", - "wa.edu.au", - "qld.gov.au", - "sa.gov.au", - "tas.gov.au", - "vic.gov.au", - "wa.gov.au", - "aw", - "com.aw", - "ax", - "az", - "com.az", - "net.az", - "int.az", - "gov.az", - "org.az", - "edu.az", - "info.az", - "pp.az", - "mil.az", - "name.az", - "pro.az", - "biz.az", - "ba", - "com.ba", - "edu.ba", - "gov.ba", - "mil.ba", - "net.ba", - "org.ba", - "bb", - "biz.bb", - "co.bb", - "com.bb", - "edu.bb", - "gov.bb", - "info.bb", - "net.bb", - "org.bb", - "store.bb", - "tv.bb", - "*.bd", - "be", - "ac.be", - "bf", - "gov.bf", - "bg", - "a.bg", - "b.bg", - "c.bg", - "d.bg", - "e.bg", - "f.bg", - "g.bg", - "h.bg", - "i.bg", - "j.bg", - "k.bg", - "l.bg", - "m.bg", - "n.bg", - "o.bg", - "p.bg", - "q.bg", - "r.bg", - "s.bg", - "t.bg", - "u.bg", - "v.bg", - "w.bg", - "x.bg", - "y.bg", - "z.bg", - "0.bg", - "1.bg", - "2.bg", - "3.bg", - "4.bg", - "5.bg", - "6.bg", - "7.bg", - "8.bg", - "9.bg", - "bh", - "com.bh", - "edu.bh", - "net.bh", - "org.bh", - "gov.bh", - "bi", - "co.bi", - "com.bi", - "edu.bi", - "or.bi", - "org.bi", - "biz", - "bj", - "asso.bj", - "barreau.bj", - "gouv.bj", - "bm", - "com.bm", - "edu.bm", - "gov.bm", - "net.bm", - "org.bm", - "*.bn", - "bo", - "com.bo", - "edu.bo", - "gov.bo", - "gob.bo", - "int.bo", - "org.bo", - "net.bo", - "mil.bo", - "tv.bo", - "br", - "adm.br", - "adv.br", - "agr.br", - "am.br", - "arq.br", - "art.br", - "ato.br", - "b.br", - "belem.br", - "bio.br", - "blog.br", - "bmd.br", - "cim.br", - "cng.br", - "cnt.br", - "com.br", - "coop.br", - "cri.br", - "def.br", - "ecn.br", - "eco.br", - "edu.br", - "emp.br", - "eng.br", - "esp.br", - "etc.br", - "eti.br", - "far.br", - "flog.br", - "floripa.br", - "fm.br", - "fnd.br", - "fot.br", - "fst.br", - "g12.br", - "ggf.br", - "gov.br", - "ac.gov.br", - "al.gov.br", - "am.gov.br", - "ap.gov.br", - "ba.gov.br", - "ce.gov.br", - "df.gov.br", - "es.gov.br", - "go.gov.br", - "ma.gov.br", - "mg.gov.br", - "ms.gov.br", - "mt.gov.br", - "pa.gov.br", - "pb.gov.br", - "pe.gov.br", - "pi.gov.br", - "pr.gov.br", - "rj.gov.br", - "rn.gov.br", - "ro.gov.br", - "rr.gov.br", - "rs.gov.br", - "sc.gov.br", - "se.gov.br", - "sp.gov.br", - "to.gov.br", - "imb.br", - "ind.br", - "inf.br", - "jampa.br", - "jor.br", - "jus.br", - "leg.br", - "lel.br", - "mat.br", - "med.br", - "mil.br", - "mp.br", - "mus.br", - "net.br", - "*.nom.br", - "not.br", - "ntr.br", - "odo.br", - "org.br", - "poa.br", - "ppg.br", - "pro.br", - "psc.br", - "psi.br", - "qsl.br", - "radio.br", - "rec.br", - "recife.br", - "slg.br", - "srv.br", - "taxi.br", - "teo.br", - "tmp.br", - "trd.br", - "tur.br", - "tv.br", - "vet.br", - "vix.br", - "vlog.br", - "wiki.br", - "zlg.br", - "bs", - "com.bs", - "net.bs", - "org.bs", - "edu.bs", - "gov.bs", - "bt", - "com.bt", - "edu.bt", - "gov.bt", - "net.bt", - "org.bt", - "bv", - "bw", - "co.bw", - "org.bw", - "by", - "gov.by", - "mil.by", - "com.by", - "of.by", - "bz", - "com.bz", - "net.bz", - "org.bz", - "edu.bz", - "gov.bz", - "ca", - "ab.ca", - "bc.ca", - "mb.ca", - "nb.ca", - "nf.ca", - "nl.ca", - "ns.ca", - "nt.ca", - "nu.ca", - "on.ca", - "pe.ca", - "qc.ca", - "sk.ca", - "yk.ca", - "gc.ca", - "cat", - "cc", - "cd", - "gov.cd", - "cf", - "cg", - "ch", - "ci", - "org.ci", - "or.ci", - "com.ci", - "co.ci", - "edu.ci", - "ed.ci", - "ac.ci", - "net.ci", - "go.ci", - "asso.ci", - "xn--aroport-bya.ci", - "int.ci", - "presse.ci", - "md.ci", - "gouv.ci", - "*.ck", - "!www.ck", - "cl", - "gov.cl", - "gob.cl", - "co.cl", - "mil.cl", - "cm", - "co.cm", - "com.cm", - "gov.cm", - "net.cm", - "cn", - "ac.cn", - "com.cn", - "edu.cn", - "gov.cn", - "net.cn", - "org.cn", - "mil.cn", - "xn--55qx5d.cn", - "xn--io0a7i.cn", - "xn--od0alg.cn", - "ah.cn", - "bj.cn", - "cq.cn", - "fj.cn", - "gd.cn", - "gs.cn", - "gz.cn", - "gx.cn", - "ha.cn", - "hb.cn", - "he.cn", - "hi.cn", - "hl.cn", - "hn.cn", - "jl.cn", - "js.cn", - "jx.cn", - "ln.cn", - "nm.cn", - "nx.cn", - "qh.cn", - "sc.cn", - "sd.cn", - "sh.cn", - "sn.cn", - "sx.cn", - "tj.cn", - "xj.cn", - "xz.cn", - "yn.cn", - "zj.cn", - "hk.cn", - "mo.cn", - "tw.cn", - "co", - "arts.co", - "com.co", - "edu.co", - "firm.co", - "gov.co", - "info.co", - "int.co", - "mil.co", - "net.co", - "nom.co", - "org.co", - "rec.co", - "web.co", - "com", - "coop", - "cr", - "ac.cr", - "co.cr", - "ed.cr", - "fi.cr", - "go.cr", - "or.cr", - "sa.cr", - "cu", - "com.cu", - "edu.cu", - "org.cu", - "net.cu", - "gov.cu", - "inf.cu", - "cv", - "cw", - "com.cw", - "edu.cw", - "net.cw", - "org.cw", - "cx", - "gov.cx", - "cy", - "ac.cy", - "biz.cy", - "com.cy", - "ekloges.cy", - "gov.cy", - "ltd.cy", - "name.cy", - "net.cy", - "org.cy", - "parliament.cy", - "press.cy", - "pro.cy", - "tm.cy", - "cz", - "de", - "dj", - "dk", - "dm", - "com.dm", - "net.dm", - "org.dm", - "edu.dm", - "gov.dm", - "do", - "art.do", - "com.do", - "edu.do", - "gob.do", - "gov.do", - "mil.do", - "net.do", - "org.do", - "sld.do", - "web.do", - "dz", - "com.dz", - "org.dz", - "net.dz", - "gov.dz", - "edu.dz", - "asso.dz", - "pol.dz", - "art.dz", - "ec", - "com.ec", - "info.ec", - "net.ec", - "fin.ec", - "k12.ec", - "med.ec", - "pro.ec", - "org.ec", - "edu.ec", - "gov.ec", - "gob.ec", - "mil.ec", - "edu", - "ee", - "edu.ee", - "gov.ee", - "riik.ee", - "lib.ee", - "med.ee", - "com.ee", - "pri.ee", - "aip.ee", - "org.ee", - "fie.ee", - "eg", - "com.eg", - "edu.eg", - "eun.eg", - "gov.eg", - "mil.eg", - "name.eg", - "net.eg", - "org.eg", - "sci.eg", - "*.er", - "es", - "com.es", - "nom.es", - "org.es", - "gob.es", - "edu.es", - "et", - "com.et", - "gov.et", - "org.et", - "edu.et", - "biz.et", - "name.et", - "info.et", - "net.et", - "eu", - "fi", - "aland.fi", - "*.fj", - "*.fk", - "fm", - "fo", - "fr", - "com.fr", - "asso.fr", - "nom.fr", - "prd.fr", - "presse.fr", - "tm.fr", - "aeroport.fr", - "assedic.fr", - "avocat.fr", - "avoues.fr", - "cci.fr", - "chambagri.fr", - "chirurgiens-dentistes.fr", - "experts-comptables.fr", - "geometre-expert.fr", - "gouv.fr", - "greta.fr", - "huissier-justice.fr", - "medecin.fr", - "notaires.fr", - "pharmacien.fr", - "port.fr", - "veterinaire.fr", - "ga", - "gb", - "gd", - "ge", - "com.ge", - "edu.ge", - "gov.ge", - "org.ge", - "mil.ge", - "net.ge", - "pvt.ge", - "gf", - "gg", - "co.gg", - "net.gg", - "org.gg", - "gh", - "com.gh", - "edu.gh", - "gov.gh", - "org.gh", - "mil.gh", - "gi", - "com.gi", - "ltd.gi", - "gov.gi", - "mod.gi", - "edu.gi", - "org.gi", - "gl", - "co.gl", - "com.gl", - "edu.gl", - "net.gl", - "org.gl", - "gm", - "gn", - "ac.gn", - "com.gn", - "edu.gn", - "gov.gn", - "org.gn", - "net.gn", - "gov", - "gp", - "com.gp", - "net.gp", - "mobi.gp", - "edu.gp", - "org.gp", - "asso.gp", - "gq", - "gr", - "com.gr", - "edu.gr", - "net.gr", - "org.gr", - "gov.gr", - "gs", - "gt", - "com.gt", - "edu.gt", - "gob.gt", - "ind.gt", - "mil.gt", - "net.gt", - "org.gt", - "*.gu", - "gw", - "gy", - "co.gy", - "com.gy", - "edu.gy", - "gov.gy", - "net.gy", - "org.gy", - "hk", - "com.hk", - "edu.hk", - "gov.hk", - "idv.hk", - "net.hk", - "org.hk", - "xn--55qx5d.hk", - "xn--wcvs22d.hk", - "xn--lcvr32d.hk", - "xn--mxtq1m.hk", - "xn--gmqw5a.hk", - "xn--ciqpn.hk", - "xn--gmq050i.hk", - "xn--zf0avx.hk", - "xn--io0a7i.hk", - "xn--mk0axi.hk", - "xn--od0alg.hk", - "xn--od0aq3b.hk", - "xn--tn0ag.hk", - "xn--uc0atv.hk", - "xn--uc0ay4a.hk", - "hm", - "hn", - "com.hn", - "edu.hn", - "org.hn", - "net.hn", - "mil.hn", - "gob.hn", - "hr", - "iz.hr", - "from.hr", - "name.hr", - "com.hr", - "ht", - "com.ht", - "shop.ht", - "firm.ht", - "info.ht", - "adult.ht", - "net.ht", - "pro.ht", - "org.ht", - "med.ht", - "art.ht", - "coop.ht", - "pol.ht", - "asso.ht", - "edu.ht", - "rel.ht", - "gouv.ht", - "perso.ht", - "hu", - "co.hu", - "info.hu", - "org.hu", - "priv.hu", - "sport.hu", - "tm.hu", - "2000.hu", - "agrar.hu", - "bolt.hu", - "casino.hu", - "city.hu", - "erotica.hu", - "erotika.hu", - "film.hu", - "forum.hu", - "games.hu", - "hotel.hu", - "ingatlan.hu", - "jogasz.hu", - "konyvelo.hu", - "lakas.hu", - "media.hu", - "news.hu", - "reklam.hu", - "sex.hu", - "shop.hu", - "suli.hu", - "szex.hu", - "tozsde.hu", - "utazas.hu", - "video.hu", - "id", - "ac.id", - "biz.id", - "co.id", - "desa.id", - "go.id", - "mil.id", - "my.id", - "net.id", - "or.id", - "sch.id", - "web.id", - "ie", - "gov.ie", - "il", - "ac.il", - "co.il", - "gov.il", - "idf.il", - "k12.il", - "muni.il", - "net.il", - "org.il", - "im", - "ac.im", - "co.im", - "com.im", - "ltd.co.im", - "net.im", - "org.im", - "plc.co.im", - "tt.im", - "tv.im", - "in", - "co.in", - "firm.in", - "net.in", - "org.in", - "gen.in", - "ind.in", - "nic.in", - "ac.in", - "edu.in", - "res.in", - "gov.in", - "mil.in", - "info", - "int", - "eu.int", - "io", - "com.io", - "iq", - "gov.iq", - "edu.iq", - "mil.iq", - "com.iq", - "org.iq", - "net.iq", - "ir", - "ac.ir", - "co.ir", - "gov.ir", - "id.ir", - "net.ir", - "org.ir", - "sch.ir", - "xn--mgba3a4f16a.ir", - "xn--mgba3a4fra.ir", - "is", - "net.is", - "com.is", - "edu.is", - "gov.is", - "org.is", - "int.is", - "it", - "gov.it", - "edu.it", - "abr.it", - "abruzzo.it", - "aosta-valley.it", - "aostavalley.it", - "bas.it", - "basilicata.it", - "cal.it", - "calabria.it", - "cam.it", - "campania.it", - "emilia-romagna.it", - "emiliaromagna.it", - "emr.it", - "friuli-v-giulia.it", - "friuli-ve-giulia.it", - "friuli-vegiulia.it", - "friuli-venezia-giulia.it", - "friuli-veneziagiulia.it", - "friuli-vgiulia.it", - "friuliv-giulia.it", - "friulive-giulia.it", - "friulivegiulia.it", - "friulivenezia-giulia.it", - "friuliveneziagiulia.it", - "friulivgiulia.it", - "fvg.it", - "laz.it", - "lazio.it", - "lig.it", - "liguria.it", - "lom.it", - "lombardia.it", - "lombardy.it", - "lucania.it", - "mar.it", - "marche.it", - "mol.it", - "molise.it", - "piedmont.it", - "piemonte.it", - "pmn.it", - "pug.it", - "puglia.it", - "sar.it", - "sardegna.it", - "sardinia.it", - "sic.it", - "sicilia.it", - "sicily.it", - "taa.it", - "tos.it", - "toscana.it", - "trentino-a-adige.it", - "trentino-aadige.it", - "trentino-alto-adige.it", - "trentino-altoadige.it", - "trentino-s-tirol.it", - "trentino-stirol.it", - "trentino-sud-tirol.it", - "trentino-sudtirol.it", - "trentino-sued-tirol.it", - "trentino-suedtirol.it", - "trentinoa-adige.it", - "trentinoaadige.it", - "trentinoalto-adige.it", - "trentinoaltoadige.it", - "trentinos-tirol.it", - "trentinostirol.it", - "trentinosud-tirol.it", - "trentinosudtirol.it", - "trentinosued-tirol.it", - "trentinosuedtirol.it", - "tuscany.it", - "umb.it", - "umbria.it", - "val-d-aosta.it", - "val-daosta.it", - "vald-aosta.it", - "valdaosta.it", - "valle-aosta.it", - "valle-d-aosta.it", - "valle-daosta.it", - "valleaosta.it", - "valled-aosta.it", - "valledaosta.it", - "vallee-aoste.it", - "valleeaoste.it", - "vao.it", - "vda.it", - "ven.it", - "veneto.it", - "ag.it", - "agrigento.it", - "al.it", - "alessandria.it", - "alto-adige.it", - "altoadige.it", - "an.it", - "ancona.it", - "andria-barletta-trani.it", - "andria-trani-barletta.it", - "andriabarlettatrani.it", - "andriatranibarletta.it", - "ao.it", - "aosta.it", - "aoste.it", - "ap.it", - "aq.it", - "aquila.it", - "ar.it", - "arezzo.it", - "ascoli-piceno.it", - "ascolipiceno.it", - "asti.it", - "at.it", - "av.it", - "avellino.it", - "ba.it", - "balsan.it", - "bari.it", - "barletta-trani-andria.it", - "barlettatraniandria.it", - "belluno.it", - "benevento.it", - "bergamo.it", - "bg.it", - "bi.it", - "biella.it", - "bl.it", - "bn.it", - "bo.it", - "bologna.it", - "bolzano.it", - "bozen.it", - "br.it", - "brescia.it", - "brindisi.it", - "bs.it", - "bt.it", - "bz.it", - "ca.it", - "cagliari.it", - "caltanissetta.it", - "campidano-medio.it", - "campidanomedio.it", - "campobasso.it", - "carbonia-iglesias.it", - "carboniaiglesias.it", - "carrara-massa.it", - "carraramassa.it", - "caserta.it", - "catania.it", - "catanzaro.it", - "cb.it", - "ce.it", - "cesena-forli.it", - "cesenaforli.it", - "ch.it", - "chieti.it", - "ci.it", - "cl.it", - "cn.it", - "co.it", - "como.it", - "cosenza.it", - "cr.it", - "cremona.it", - "crotone.it", - "cs.it", - "ct.it", - "cuneo.it", - "cz.it", - "dell-ogliastra.it", - "dellogliastra.it", - "en.it", - "enna.it", - "fc.it", - "fe.it", - "fermo.it", - "ferrara.it", - "fg.it", - "fi.it", - "firenze.it", - "florence.it", - "fm.it", - "foggia.it", - "forli-cesena.it", - "forlicesena.it", - "fr.it", - "frosinone.it", - "ge.it", - "genoa.it", - "genova.it", - "go.it", - "gorizia.it", - "gr.it", - "grosseto.it", - "iglesias-carbonia.it", - "iglesiascarbonia.it", - "im.it", - "imperia.it", - "is.it", - "isernia.it", - "kr.it", - "la-spezia.it", - "laquila.it", - "laspezia.it", - "latina.it", - "lc.it", - "le.it", - "lecce.it", - "lecco.it", - "li.it", - "livorno.it", - "lo.it", - "lodi.it", - "lt.it", - "lu.it", - "lucca.it", - "macerata.it", - "mantova.it", - "massa-carrara.it", - "massacarrara.it", - "matera.it", - "mb.it", - "mc.it", - "me.it", - "medio-campidano.it", - "mediocampidano.it", - "messina.it", - "mi.it", - "milan.it", - "milano.it", - "mn.it", - "mo.it", - "modena.it", - "monza-brianza.it", - "monza-e-della-brianza.it", - "monza.it", - "monzabrianza.it", - "monzaebrianza.it", - "monzaedellabrianza.it", - "ms.it", - "mt.it", - "na.it", - "naples.it", - "napoli.it", - "no.it", - "novara.it", - "nu.it", - "nuoro.it", - "og.it", - "ogliastra.it", - "olbia-tempio.it", - "olbiatempio.it", - "or.it", - "oristano.it", - "ot.it", - "pa.it", - "padova.it", - "padua.it", - "palermo.it", - "parma.it", - "pavia.it", - "pc.it", - "pd.it", - "pe.it", - "perugia.it", - "pesaro-urbino.it", - "pesarourbino.it", - "pescara.it", - "pg.it", - "pi.it", - "piacenza.it", - "pisa.it", - "pistoia.it", - "pn.it", - "po.it", - "pordenone.it", - "potenza.it", - "pr.it", - "prato.it", - "pt.it", - "pu.it", - "pv.it", - "pz.it", - "ra.it", - "ragusa.it", - "ravenna.it", - "rc.it", - "re.it", - "reggio-calabria.it", - "reggio-emilia.it", - "reggiocalabria.it", - "reggioemilia.it", - "rg.it", - "ri.it", - "rieti.it", - "rimini.it", - "rm.it", - "rn.it", - "ro.it", - "roma.it", - "rome.it", - "rovigo.it", - "sa.it", - "salerno.it", - "sassari.it", - "savona.it", - "si.it", - "siena.it", - "siracusa.it", - "so.it", - "sondrio.it", - "sp.it", - "sr.it", - "ss.it", - "suedtirol.it", - "sv.it", - "ta.it", - "taranto.it", - "te.it", - "tempio-olbia.it", - "tempioolbia.it", - "teramo.it", - "terni.it", - "tn.it", - "to.it", - "torino.it", - "tp.it", - "tr.it", - "trani-andria-barletta.it", - "trani-barletta-andria.it", - "traniandriabarletta.it", - "tranibarlettaandria.it", - "trapani.it", - "trentino.it", - "trento.it", - "treviso.it", - "trieste.it", - "ts.it", - "turin.it", - "tv.it", - "ud.it", - "udine.it", - "urbino-pesaro.it", - "urbinopesaro.it", - "va.it", - "varese.it", - "vb.it", - "vc.it", - "ve.it", - "venezia.it", - "venice.it", - "verbania.it", - "vercelli.it", - "verona.it", - "vi.it", - "vibo-valentia.it", - "vibovalentia.it", - "vicenza.it", - "viterbo.it", - "vr.it", - "vs.it", - "vt.it", - "vv.it", - "je", - "co.je", - "net.je", - "org.je", - "*.jm", - "jo", - "com.jo", - "org.jo", - "net.jo", - "edu.jo", - "sch.jo", - "gov.jo", - "mil.jo", - "name.jo", - "jobs", - "jp", - "ac.jp", - "ad.jp", - "co.jp", - "ed.jp", - "go.jp", - "gr.jp", - "lg.jp", - "ne.jp", - "or.jp", - "aichi.jp", - "akita.jp", - "aomori.jp", - "chiba.jp", - "ehime.jp", - "fukui.jp", - "fukuoka.jp", - "fukushima.jp", - "gifu.jp", - "gunma.jp", - "hiroshima.jp", - "hokkaido.jp", - "hyogo.jp", - "ibaraki.jp", - "ishikawa.jp", - "iwate.jp", - "kagawa.jp", - "kagoshima.jp", - "kanagawa.jp", - "kochi.jp", - "kumamoto.jp", - "kyoto.jp", - "mie.jp", - "miyagi.jp", - "miyazaki.jp", - "nagano.jp", - "nagasaki.jp", - "nara.jp", - "niigata.jp", - "oita.jp", - "okayama.jp", - "okinawa.jp", - "osaka.jp", - "saga.jp", - "saitama.jp", - "shiga.jp", - "shimane.jp", - "shizuoka.jp", - "tochigi.jp", - "tokushima.jp", - "tokyo.jp", - "tottori.jp", - "toyama.jp", - "wakayama.jp", - "yamagata.jp", - "yamaguchi.jp", - "yamanashi.jp", - "xn--4pvxs.jp", - "xn--vgu402c.jp", - "xn--c3s14m.jp", - "xn--f6qx53a.jp", - "xn--8pvr4u.jp", - "xn--uist22h.jp", - "xn--djrs72d6uy.jp", - "xn--mkru45i.jp", - "xn--0trq7p7nn.jp", - "xn--8ltr62k.jp", - "xn--2m4a15e.jp", - "xn--efvn9s.jp", - "xn--32vp30h.jp", - "xn--4it797k.jp", - "xn--1lqs71d.jp", - "xn--5rtp49c.jp", - "xn--5js045d.jp", - "xn--ehqz56n.jp", - "xn--1lqs03n.jp", - "xn--qqqt11m.jp", - "xn--kbrq7o.jp", - "xn--pssu33l.jp", - "xn--ntsq17g.jp", - "xn--uisz3g.jp", - "xn--6btw5a.jp", - "xn--1ctwo.jp", - "xn--6orx2r.jp", - "xn--rht61e.jp", - "xn--rht27z.jp", - "xn--djty4k.jp", - "xn--nit225k.jp", - "xn--rht3d.jp", - "xn--klty5x.jp", - "xn--kltx9a.jp", - "xn--kltp7d.jp", - "xn--uuwu58a.jp", - "xn--zbx025d.jp", - "xn--ntso0iqx3a.jp", - "xn--elqq16h.jp", - "xn--4it168d.jp", - "xn--klt787d.jp", - "xn--rny31h.jp", - "xn--7t0a264c.jp", - "xn--5rtq34k.jp", - "xn--k7yn95e.jp", - "xn--tor131o.jp", - "xn--d5qv7z876c.jp", - "*.kawasaki.jp", - "*.kitakyushu.jp", - "*.kobe.jp", - "*.nagoya.jp", - "*.sapporo.jp", - "*.sendai.jp", - "*.yokohama.jp", - "!city.kawasaki.jp", - "!city.kitakyushu.jp", - "!city.kobe.jp", - "!city.nagoya.jp", - "!city.sapporo.jp", - "!city.sendai.jp", - "!city.yokohama.jp", - "aisai.aichi.jp", - "ama.aichi.jp", - "anjo.aichi.jp", - "asuke.aichi.jp", - "chiryu.aichi.jp", - "chita.aichi.jp", - "fuso.aichi.jp", - "gamagori.aichi.jp", - "handa.aichi.jp", - "hazu.aichi.jp", - "hekinan.aichi.jp", - "higashiura.aichi.jp", - "ichinomiya.aichi.jp", - "inazawa.aichi.jp", - "inuyama.aichi.jp", - "isshiki.aichi.jp", - "iwakura.aichi.jp", - "kanie.aichi.jp", - "kariya.aichi.jp", - "kasugai.aichi.jp", - "kira.aichi.jp", - "kiyosu.aichi.jp", - "komaki.aichi.jp", - "konan.aichi.jp", - "kota.aichi.jp", - "mihama.aichi.jp", - "miyoshi.aichi.jp", - "nishio.aichi.jp", - "nisshin.aichi.jp", - "obu.aichi.jp", - "oguchi.aichi.jp", - "oharu.aichi.jp", - "okazaki.aichi.jp", - "owariasahi.aichi.jp", - "seto.aichi.jp", - "shikatsu.aichi.jp", - "shinshiro.aichi.jp", - "shitara.aichi.jp", - "tahara.aichi.jp", - "takahama.aichi.jp", - "tobishima.aichi.jp", - "toei.aichi.jp", - "togo.aichi.jp", - "tokai.aichi.jp", - "tokoname.aichi.jp", - "toyoake.aichi.jp", - "toyohashi.aichi.jp", - "toyokawa.aichi.jp", - "toyone.aichi.jp", - "toyota.aichi.jp", - "tsushima.aichi.jp", - "yatomi.aichi.jp", - "akita.akita.jp", - "daisen.akita.jp", - "fujisato.akita.jp", - "gojome.akita.jp", - "hachirogata.akita.jp", - "happou.akita.jp", - "higashinaruse.akita.jp", - "honjo.akita.jp", - "honjyo.akita.jp", - "ikawa.akita.jp", - "kamikoani.akita.jp", - "kamioka.akita.jp", - "katagami.akita.jp", - "kazuno.akita.jp", - "kitaakita.akita.jp", - "kosaka.akita.jp", - "kyowa.akita.jp", - "misato.akita.jp", - "mitane.akita.jp", - "moriyoshi.akita.jp", - "nikaho.akita.jp", - "noshiro.akita.jp", - "odate.akita.jp", - "oga.akita.jp", - "ogata.akita.jp", - "semboku.akita.jp", - "yokote.akita.jp", - "yurihonjo.akita.jp", - "aomori.aomori.jp", - "gonohe.aomori.jp", - "hachinohe.aomori.jp", - "hashikami.aomori.jp", - "hiranai.aomori.jp", - "hirosaki.aomori.jp", - "itayanagi.aomori.jp", - "kuroishi.aomori.jp", - "misawa.aomori.jp", - "mutsu.aomori.jp", - "nakadomari.aomori.jp", - "noheji.aomori.jp", - "oirase.aomori.jp", - "owani.aomori.jp", - "rokunohe.aomori.jp", - "sannohe.aomori.jp", - "shichinohe.aomori.jp", - "shingo.aomori.jp", - "takko.aomori.jp", - "towada.aomori.jp", - "tsugaru.aomori.jp", - "tsuruta.aomori.jp", - "abiko.chiba.jp", - "asahi.chiba.jp", - "chonan.chiba.jp", - "chosei.chiba.jp", - "choshi.chiba.jp", - "chuo.chiba.jp", - "funabashi.chiba.jp", - "futtsu.chiba.jp", - "hanamigawa.chiba.jp", - "ichihara.chiba.jp", - "ichikawa.chiba.jp", - "ichinomiya.chiba.jp", - "inzai.chiba.jp", - "isumi.chiba.jp", - "kamagaya.chiba.jp", - "kamogawa.chiba.jp", - "kashiwa.chiba.jp", - "katori.chiba.jp", - "katsuura.chiba.jp", - "kimitsu.chiba.jp", - "kisarazu.chiba.jp", - "kozaki.chiba.jp", - "kujukuri.chiba.jp", - "kyonan.chiba.jp", - "matsudo.chiba.jp", - "midori.chiba.jp", - "mihama.chiba.jp", - "minamiboso.chiba.jp", - "mobara.chiba.jp", - "mutsuzawa.chiba.jp", - "nagara.chiba.jp", - "nagareyama.chiba.jp", - "narashino.chiba.jp", - "narita.chiba.jp", - "noda.chiba.jp", - "oamishirasato.chiba.jp", - "omigawa.chiba.jp", - "onjuku.chiba.jp", - "otaki.chiba.jp", - "sakae.chiba.jp", - "sakura.chiba.jp", - "shimofusa.chiba.jp", - "shirako.chiba.jp", - "shiroi.chiba.jp", - "shisui.chiba.jp", - "sodegaura.chiba.jp", - "sosa.chiba.jp", - "tako.chiba.jp", - "tateyama.chiba.jp", - "togane.chiba.jp", - "tohnosho.chiba.jp", - "tomisato.chiba.jp", - "urayasu.chiba.jp", - "yachimata.chiba.jp", - "yachiyo.chiba.jp", - "yokaichiba.chiba.jp", - "yokoshibahikari.chiba.jp", - "yotsukaido.chiba.jp", - "ainan.ehime.jp", - "honai.ehime.jp", - "ikata.ehime.jp", - "imabari.ehime.jp", - "iyo.ehime.jp", - "kamijima.ehime.jp", - "kihoku.ehime.jp", - "kumakogen.ehime.jp", - "masaki.ehime.jp", - "matsuno.ehime.jp", - "matsuyama.ehime.jp", - "namikata.ehime.jp", - "niihama.ehime.jp", - "ozu.ehime.jp", - "saijo.ehime.jp", - "seiyo.ehime.jp", - "shikokuchuo.ehime.jp", - "tobe.ehime.jp", - "toon.ehime.jp", - "uchiko.ehime.jp", - "uwajima.ehime.jp", - "yawatahama.ehime.jp", - "echizen.fukui.jp", - "eiheiji.fukui.jp", - "fukui.fukui.jp", - "ikeda.fukui.jp", - "katsuyama.fukui.jp", - "mihama.fukui.jp", - "minamiechizen.fukui.jp", - "obama.fukui.jp", - "ohi.fukui.jp", - "ono.fukui.jp", - "sabae.fukui.jp", - "sakai.fukui.jp", - "takahama.fukui.jp", - "tsuruga.fukui.jp", - "wakasa.fukui.jp", - "ashiya.fukuoka.jp", - "buzen.fukuoka.jp", - "chikugo.fukuoka.jp", - "chikuho.fukuoka.jp", - "chikujo.fukuoka.jp", - "chikushino.fukuoka.jp", - "chikuzen.fukuoka.jp", - "chuo.fukuoka.jp", - "dazaifu.fukuoka.jp", - "fukuchi.fukuoka.jp", - "hakata.fukuoka.jp", - "higashi.fukuoka.jp", - "hirokawa.fukuoka.jp", - "hisayama.fukuoka.jp", - "iizuka.fukuoka.jp", - "inatsuki.fukuoka.jp", - "kaho.fukuoka.jp", - "kasuga.fukuoka.jp", - "kasuya.fukuoka.jp", - "kawara.fukuoka.jp", - "keisen.fukuoka.jp", - "koga.fukuoka.jp", - "kurate.fukuoka.jp", - "kurogi.fukuoka.jp", - "kurume.fukuoka.jp", - "minami.fukuoka.jp", - "miyako.fukuoka.jp", - "miyama.fukuoka.jp", - "miyawaka.fukuoka.jp", - "mizumaki.fukuoka.jp", - "munakata.fukuoka.jp", - "nakagawa.fukuoka.jp", - "nakama.fukuoka.jp", - "nishi.fukuoka.jp", - "nogata.fukuoka.jp", - "ogori.fukuoka.jp", - "okagaki.fukuoka.jp", - "okawa.fukuoka.jp", - "oki.fukuoka.jp", - "omuta.fukuoka.jp", - "onga.fukuoka.jp", - "onojo.fukuoka.jp", - "oto.fukuoka.jp", - "saigawa.fukuoka.jp", - "sasaguri.fukuoka.jp", - "shingu.fukuoka.jp", - "shinyoshitomi.fukuoka.jp", - "shonai.fukuoka.jp", - "soeda.fukuoka.jp", - "sue.fukuoka.jp", - "tachiarai.fukuoka.jp", - "tagawa.fukuoka.jp", - "takata.fukuoka.jp", - "toho.fukuoka.jp", - "toyotsu.fukuoka.jp", - "tsuiki.fukuoka.jp", - "ukiha.fukuoka.jp", - "umi.fukuoka.jp", - "usui.fukuoka.jp", - "yamada.fukuoka.jp", - "yame.fukuoka.jp", - "yanagawa.fukuoka.jp", - "yukuhashi.fukuoka.jp", - "aizubange.fukushima.jp", - "aizumisato.fukushima.jp", - "aizuwakamatsu.fukushima.jp", - "asakawa.fukushima.jp", - "bandai.fukushima.jp", - "date.fukushima.jp", - "fukushima.fukushima.jp", - "furudono.fukushima.jp", - "futaba.fukushima.jp", - "hanawa.fukushima.jp", - "higashi.fukushima.jp", - "hirata.fukushima.jp", - "hirono.fukushima.jp", - "iitate.fukushima.jp", - "inawashiro.fukushima.jp", - "ishikawa.fukushima.jp", - "iwaki.fukushima.jp", - "izumizaki.fukushima.jp", - "kagamiishi.fukushima.jp", - "kaneyama.fukushima.jp", - "kawamata.fukushima.jp", - "kitakata.fukushima.jp", - "kitashiobara.fukushima.jp", - "koori.fukushima.jp", - "koriyama.fukushima.jp", - "kunimi.fukushima.jp", - "miharu.fukushima.jp", - "mishima.fukushima.jp", - "namie.fukushima.jp", - "nango.fukushima.jp", - "nishiaizu.fukushima.jp", - "nishigo.fukushima.jp", - "okuma.fukushima.jp", - "omotego.fukushima.jp", - "ono.fukushima.jp", - "otama.fukushima.jp", - "samegawa.fukushima.jp", - "shimogo.fukushima.jp", - "shirakawa.fukushima.jp", - "showa.fukushima.jp", - "soma.fukushima.jp", - "sukagawa.fukushima.jp", - "taishin.fukushima.jp", - "tamakawa.fukushima.jp", - "tanagura.fukushima.jp", - "tenei.fukushima.jp", - "yabuki.fukushima.jp", - "yamato.fukushima.jp", - "yamatsuri.fukushima.jp", - "yanaizu.fukushima.jp", - "yugawa.fukushima.jp", - "anpachi.gifu.jp", - "ena.gifu.jp", - "gifu.gifu.jp", - "ginan.gifu.jp", - "godo.gifu.jp", - "gujo.gifu.jp", - "hashima.gifu.jp", - "hichiso.gifu.jp", - "hida.gifu.jp", - "higashishirakawa.gifu.jp", - "ibigawa.gifu.jp", - "ikeda.gifu.jp", - "kakamigahara.gifu.jp", - "kani.gifu.jp", - "kasahara.gifu.jp", - "kasamatsu.gifu.jp", - "kawaue.gifu.jp", - "kitagata.gifu.jp", - "mino.gifu.jp", - "minokamo.gifu.jp", - "mitake.gifu.jp", - "mizunami.gifu.jp", - "motosu.gifu.jp", - "nakatsugawa.gifu.jp", - "ogaki.gifu.jp", - "sakahogi.gifu.jp", - "seki.gifu.jp", - "sekigahara.gifu.jp", - "shirakawa.gifu.jp", - "tajimi.gifu.jp", - "takayama.gifu.jp", - "tarui.gifu.jp", - "toki.gifu.jp", - "tomika.gifu.jp", - "wanouchi.gifu.jp", - "yamagata.gifu.jp", - "yaotsu.gifu.jp", - "yoro.gifu.jp", - "annaka.gunma.jp", - "chiyoda.gunma.jp", - "fujioka.gunma.jp", - "higashiagatsuma.gunma.jp", - "isesaki.gunma.jp", - "itakura.gunma.jp", - "kanna.gunma.jp", - "kanra.gunma.jp", - "katashina.gunma.jp", - "kawaba.gunma.jp", - "kiryu.gunma.jp", - "kusatsu.gunma.jp", - "maebashi.gunma.jp", - "meiwa.gunma.jp", - "midori.gunma.jp", - "minakami.gunma.jp", - "naganohara.gunma.jp", - "nakanojo.gunma.jp", - "nanmoku.gunma.jp", - "numata.gunma.jp", - "oizumi.gunma.jp", - "ora.gunma.jp", - "ota.gunma.jp", - "shibukawa.gunma.jp", - "shimonita.gunma.jp", - "shinto.gunma.jp", - "showa.gunma.jp", - "takasaki.gunma.jp", - "takayama.gunma.jp", - "tamamura.gunma.jp", - "tatebayashi.gunma.jp", - "tomioka.gunma.jp", - "tsukiyono.gunma.jp", - "tsumagoi.gunma.jp", - "ueno.gunma.jp", - "yoshioka.gunma.jp", - "asaminami.hiroshima.jp", - "daiwa.hiroshima.jp", - "etajima.hiroshima.jp", - "fuchu.hiroshima.jp", - "fukuyama.hiroshima.jp", - "hatsukaichi.hiroshima.jp", - "higashihiroshima.hiroshima.jp", - "hongo.hiroshima.jp", - "jinsekikogen.hiroshima.jp", - "kaita.hiroshima.jp", - "kui.hiroshima.jp", - "kumano.hiroshima.jp", - "kure.hiroshima.jp", - "mihara.hiroshima.jp", - "miyoshi.hiroshima.jp", - "naka.hiroshima.jp", - "onomichi.hiroshima.jp", - "osakikamijima.hiroshima.jp", - "otake.hiroshima.jp", - "saka.hiroshima.jp", - "sera.hiroshima.jp", - "seranishi.hiroshima.jp", - "shinichi.hiroshima.jp", - "shobara.hiroshima.jp", - "takehara.hiroshima.jp", - "abashiri.hokkaido.jp", - "abira.hokkaido.jp", - "aibetsu.hokkaido.jp", - "akabira.hokkaido.jp", - "akkeshi.hokkaido.jp", - "asahikawa.hokkaido.jp", - "ashibetsu.hokkaido.jp", - "ashoro.hokkaido.jp", - "assabu.hokkaido.jp", - "atsuma.hokkaido.jp", - "bibai.hokkaido.jp", - "biei.hokkaido.jp", - "bifuka.hokkaido.jp", - "bihoro.hokkaido.jp", - "biratori.hokkaido.jp", - "chippubetsu.hokkaido.jp", - "chitose.hokkaido.jp", - "date.hokkaido.jp", - "ebetsu.hokkaido.jp", - "embetsu.hokkaido.jp", - "eniwa.hokkaido.jp", - "erimo.hokkaido.jp", - "esan.hokkaido.jp", - "esashi.hokkaido.jp", - "fukagawa.hokkaido.jp", - "fukushima.hokkaido.jp", - "furano.hokkaido.jp", - "furubira.hokkaido.jp", - "haboro.hokkaido.jp", - "hakodate.hokkaido.jp", - "hamatonbetsu.hokkaido.jp", - "hidaka.hokkaido.jp", - "higashikagura.hokkaido.jp", - "higashikawa.hokkaido.jp", - "hiroo.hokkaido.jp", - "hokuryu.hokkaido.jp", - "hokuto.hokkaido.jp", - "honbetsu.hokkaido.jp", - "horokanai.hokkaido.jp", - "horonobe.hokkaido.jp", - "ikeda.hokkaido.jp", - "imakane.hokkaido.jp", - "ishikari.hokkaido.jp", - "iwamizawa.hokkaido.jp", - "iwanai.hokkaido.jp", - "kamifurano.hokkaido.jp", - "kamikawa.hokkaido.jp", - "kamishihoro.hokkaido.jp", - "kamisunagawa.hokkaido.jp", - "kamoenai.hokkaido.jp", - "kayabe.hokkaido.jp", - "kembuchi.hokkaido.jp", - "kikonai.hokkaido.jp", - "kimobetsu.hokkaido.jp", - "kitahiroshima.hokkaido.jp", - "kitami.hokkaido.jp", - "kiyosato.hokkaido.jp", - "koshimizu.hokkaido.jp", - "kunneppu.hokkaido.jp", - "kuriyama.hokkaido.jp", - "kuromatsunai.hokkaido.jp", - "kushiro.hokkaido.jp", - "kutchan.hokkaido.jp", - "kyowa.hokkaido.jp", - "mashike.hokkaido.jp", - "matsumae.hokkaido.jp", - "mikasa.hokkaido.jp", - "minamifurano.hokkaido.jp", - "mombetsu.hokkaido.jp", - "moseushi.hokkaido.jp", - "mukawa.hokkaido.jp", - "muroran.hokkaido.jp", - "naie.hokkaido.jp", - "nakagawa.hokkaido.jp", - "nakasatsunai.hokkaido.jp", - "nakatombetsu.hokkaido.jp", - "nanae.hokkaido.jp", - "nanporo.hokkaido.jp", - "nayoro.hokkaido.jp", - "nemuro.hokkaido.jp", - "niikappu.hokkaido.jp", - "niki.hokkaido.jp", - "nishiokoppe.hokkaido.jp", - "noboribetsu.hokkaido.jp", - "numata.hokkaido.jp", - "obihiro.hokkaido.jp", - "obira.hokkaido.jp", - "oketo.hokkaido.jp", - "okoppe.hokkaido.jp", - "otaru.hokkaido.jp", - "otobe.hokkaido.jp", - "otofuke.hokkaido.jp", - "otoineppu.hokkaido.jp", - "oumu.hokkaido.jp", - "ozora.hokkaido.jp", - "pippu.hokkaido.jp", - "rankoshi.hokkaido.jp", - "rebun.hokkaido.jp", - "rikubetsu.hokkaido.jp", - "rishiri.hokkaido.jp", - "rishirifuji.hokkaido.jp", - "saroma.hokkaido.jp", - "sarufutsu.hokkaido.jp", - "shakotan.hokkaido.jp", - "shari.hokkaido.jp", - "shibecha.hokkaido.jp", - "shibetsu.hokkaido.jp", - "shikabe.hokkaido.jp", - "shikaoi.hokkaido.jp", - "shimamaki.hokkaido.jp", - "shimizu.hokkaido.jp", - "shimokawa.hokkaido.jp", - "shinshinotsu.hokkaido.jp", - "shintoku.hokkaido.jp", - "shiranuka.hokkaido.jp", - "shiraoi.hokkaido.jp", - "shiriuchi.hokkaido.jp", - "sobetsu.hokkaido.jp", - "sunagawa.hokkaido.jp", - "taiki.hokkaido.jp", - "takasu.hokkaido.jp", - "takikawa.hokkaido.jp", - "takinoue.hokkaido.jp", - "teshikaga.hokkaido.jp", - "tobetsu.hokkaido.jp", - "tohma.hokkaido.jp", - "tomakomai.hokkaido.jp", - "tomari.hokkaido.jp", - "toya.hokkaido.jp", - "toyako.hokkaido.jp", - "toyotomi.hokkaido.jp", - "toyoura.hokkaido.jp", - "tsubetsu.hokkaido.jp", - "tsukigata.hokkaido.jp", - "urakawa.hokkaido.jp", - "urausu.hokkaido.jp", - "uryu.hokkaido.jp", - "utashinai.hokkaido.jp", - "wakkanai.hokkaido.jp", - "wassamu.hokkaido.jp", - "yakumo.hokkaido.jp", - "yoichi.hokkaido.jp", - "aioi.hyogo.jp", - "akashi.hyogo.jp", - "ako.hyogo.jp", - "amagasaki.hyogo.jp", - "aogaki.hyogo.jp", - "asago.hyogo.jp", - "ashiya.hyogo.jp", - "awaji.hyogo.jp", - "fukusaki.hyogo.jp", - "goshiki.hyogo.jp", - "harima.hyogo.jp", - "himeji.hyogo.jp", - "ichikawa.hyogo.jp", - "inagawa.hyogo.jp", - "itami.hyogo.jp", - "kakogawa.hyogo.jp", - "kamigori.hyogo.jp", - "kamikawa.hyogo.jp", - "kasai.hyogo.jp", - "kasuga.hyogo.jp", - "kawanishi.hyogo.jp", - "miki.hyogo.jp", - "minamiawaji.hyogo.jp", - "nishinomiya.hyogo.jp", - "nishiwaki.hyogo.jp", - "ono.hyogo.jp", - "sanda.hyogo.jp", - "sannan.hyogo.jp", - "sasayama.hyogo.jp", - "sayo.hyogo.jp", - "shingu.hyogo.jp", - "shinonsen.hyogo.jp", - "shiso.hyogo.jp", - "sumoto.hyogo.jp", - "taishi.hyogo.jp", - "taka.hyogo.jp", - "takarazuka.hyogo.jp", - "takasago.hyogo.jp", - "takino.hyogo.jp", - "tamba.hyogo.jp", - "tatsuno.hyogo.jp", - "toyooka.hyogo.jp", - "yabu.hyogo.jp", - "yashiro.hyogo.jp", - "yoka.hyogo.jp", - "yokawa.hyogo.jp", - "ami.ibaraki.jp", - "asahi.ibaraki.jp", - "bando.ibaraki.jp", - "chikusei.ibaraki.jp", - "daigo.ibaraki.jp", - "fujishiro.ibaraki.jp", - "hitachi.ibaraki.jp", - "hitachinaka.ibaraki.jp", - "hitachiomiya.ibaraki.jp", - "hitachiota.ibaraki.jp", - "ibaraki.ibaraki.jp", - "ina.ibaraki.jp", - "inashiki.ibaraki.jp", - "itako.ibaraki.jp", - "iwama.ibaraki.jp", - "joso.ibaraki.jp", - "kamisu.ibaraki.jp", - "kasama.ibaraki.jp", - "kashima.ibaraki.jp", - "kasumigaura.ibaraki.jp", - "koga.ibaraki.jp", - "miho.ibaraki.jp", - "mito.ibaraki.jp", - "moriya.ibaraki.jp", - "naka.ibaraki.jp", - "namegata.ibaraki.jp", - "oarai.ibaraki.jp", - "ogawa.ibaraki.jp", - "omitama.ibaraki.jp", - "ryugasaki.ibaraki.jp", - "sakai.ibaraki.jp", - "sakuragawa.ibaraki.jp", - "shimodate.ibaraki.jp", - "shimotsuma.ibaraki.jp", - "shirosato.ibaraki.jp", - "sowa.ibaraki.jp", - "suifu.ibaraki.jp", - "takahagi.ibaraki.jp", - "tamatsukuri.ibaraki.jp", - "tokai.ibaraki.jp", - "tomobe.ibaraki.jp", - "tone.ibaraki.jp", - "toride.ibaraki.jp", - "tsuchiura.ibaraki.jp", - "tsukuba.ibaraki.jp", - "uchihara.ibaraki.jp", - "ushiku.ibaraki.jp", - "yachiyo.ibaraki.jp", - "yamagata.ibaraki.jp", - "yawara.ibaraki.jp", - "yuki.ibaraki.jp", - "anamizu.ishikawa.jp", - "hakui.ishikawa.jp", - "hakusan.ishikawa.jp", - "kaga.ishikawa.jp", - "kahoku.ishikawa.jp", - "kanazawa.ishikawa.jp", - "kawakita.ishikawa.jp", - "komatsu.ishikawa.jp", - "nakanoto.ishikawa.jp", - "nanao.ishikawa.jp", - "nomi.ishikawa.jp", - "nonoichi.ishikawa.jp", - "noto.ishikawa.jp", - "shika.ishikawa.jp", - "suzu.ishikawa.jp", - "tsubata.ishikawa.jp", - "tsurugi.ishikawa.jp", - "uchinada.ishikawa.jp", - "wajima.ishikawa.jp", - "fudai.iwate.jp", - "fujisawa.iwate.jp", - "hanamaki.iwate.jp", - "hiraizumi.iwate.jp", - "hirono.iwate.jp", - "ichinohe.iwate.jp", - "ichinoseki.iwate.jp", - "iwaizumi.iwate.jp", - "iwate.iwate.jp", - "joboji.iwate.jp", - "kamaishi.iwate.jp", - "kanegasaki.iwate.jp", - "karumai.iwate.jp", - "kawai.iwate.jp", - "kitakami.iwate.jp", - "kuji.iwate.jp", - "kunohe.iwate.jp", - "kuzumaki.iwate.jp", - "miyako.iwate.jp", - "mizusawa.iwate.jp", - "morioka.iwate.jp", - "ninohe.iwate.jp", - "noda.iwate.jp", - "ofunato.iwate.jp", - "oshu.iwate.jp", - "otsuchi.iwate.jp", - "rikuzentakata.iwate.jp", - "shiwa.iwate.jp", - "shizukuishi.iwate.jp", - "sumita.iwate.jp", - "tanohata.iwate.jp", - "tono.iwate.jp", - "yahaba.iwate.jp", - "yamada.iwate.jp", - "ayagawa.kagawa.jp", - "higashikagawa.kagawa.jp", - "kanonji.kagawa.jp", - "kotohira.kagawa.jp", - "manno.kagawa.jp", - "marugame.kagawa.jp", - "mitoyo.kagawa.jp", - "naoshima.kagawa.jp", - "sanuki.kagawa.jp", - "tadotsu.kagawa.jp", - "takamatsu.kagawa.jp", - "tonosho.kagawa.jp", - "uchinomi.kagawa.jp", - "utazu.kagawa.jp", - "zentsuji.kagawa.jp", - "akune.kagoshima.jp", - "amami.kagoshima.jp", - "hioki.kagoshima.jp", - "isa.kagoshima.jp", - "isen.kagoshima.jp", - "izumi.kagoshima.jp", - "kagoshima.kagoshima.jp", - "kanoya.kagoshima.jp", - "kawanabe.kagoshima.jp", - "kinko.kagoshima.jp", - "kouyama.kagoshima.jp", - "makurazaki.kagoshima.jp", - "matsumoto.kagoshima.jp", - "minamitane.kagoshima.jp", - "nakatane.kagoshima.jp", - "nishinoomote.kagoshima.jp", - "satsumasendai.kagoshima.jp", - "soo.kagoshima.jp", - "tarumizu.kagoshima.jp", - "yusui.kagoshima.jp", - "aikawa.kanagawa.jp", - "atsugi.kanagawa.jp", - "ayase.kanagawa.jp", - "chigasaki.kanagawa.jp", - "ebina.kanagawa.jp", - "fujisawa.kanagawa.jp", - "hadano.kanagawa.jp", - "hakone.kanagawa.jp", - "hiratsuka.kanagawa.jp", - "isehara.kanagawa.jp", - "kaisei.kanagawa.jp", - "kamakura.kanagawa.jp", - "kiyokawa.kanagawa.jp", - "matsuda.kanagawa.jp", - "minamiashigara.kanagawa.jp", - "miura.kanagawa.jp", - "nakai.kanagawa.jp", - "ninomiya.kanagawa.jp", - "odawara.kanagawa.jp", - "oi.kanagawa.jp", - "oiso.kanagawa.jp", - "sagamihara.kanagawa.jp", - "samukawa.kanagawa.jp", - "tsukui.kanagawa.jp", - "yamakita.kanagawa.jp", - "yamato.kanagawa.jp", - "yokosuka.kanagawa.jp", - "yugawara.kanagawa.jp", - "zama.kanagawa.jp", - "zushi.kanagawa.jp", - "aki.kochi.jp", - "geisei.kochi.jp", - "hidaka.kochi.jp", - "higashitsuno.kochi.jp", - "ino.kochi.jp", - "kagami.kochi.jp", - "kami.kochi.jp", - "kitagawa.kochi.jp", - "kochi.kochi.jp", - "mihara.kochi.jp", - "motoyama.kochi.jp", - "muroto.kochi.jp", - "nahari.kochi.jp", - "nakamura.kochi.jp", - "nankoku.kochi.jp", - "nishitosa.kochi.jp", - "niyodogawa.kochi.jp", - "ochi.kochi.jp", - "okawa.kochi.jp", - "otoyo.kochi.jp", - "otsuki.kochi.jp", - "sakawa.kochi.jp", - "sukumo.kochi.jp", - "susaki.kochi.jp", - "tosa.kochi.jp", - "tosashimizu.kochi.jp", - "toyo.kochi.jp", - "tsuno.kochi.jp", - "umaji.kochi.jp", - "yasuda.kochi.jp", - "yusuhara.kochi.jp", - "amakusa.kumamoto.jp", - "arao.kumamoto.jp", - "aso.kumamoto.jp", - "choyo.kumamoto.jp", - "gyokuto.kumamoto.jp", - "kamiamakusa.kumamoto.jp", - "kikuchi.kumamoto.jp", - "kumamoto.kumamoto.jp", - "mashiki.kumamoto.jp", - "mifune.kumamoto.jp", - "minamata.kumamoto.jp", - "minamioguni.kumamoto.jp", - "nagasu.kumamoto.jp", - "nishihara.kumamoto.jp", - "oguni.kumamoto.jp", - "ozu.kumamoto.jp", - "sumoto.kumamoto.jp", - "takamori.kumamoto.jp", - "uki.kumamoto.jp", - "uto.kumamoto.jp", - "yamaga.kumamoto.jp", - "yamato.kumamoto.jp", - "yatsushiro.kumamoto.jp", - "ayabe.kyoto.jp", - "fukuchiyama.kyoto.jp", - "higashiyama.kyoto.jp", - "ide.kyoto.jp", - "ine.kyoto.jp", - "joyo.kyoto.jp", - "kameoka.kyoto.jp", - "kamo.kyoto.jp", - "kita.kyoto.jp", - "kizu.kyoto.jp", - "kumiyama.kyoto.jp", - "kyotamba.kyoto.jp", - "kyotanabe.kyoto.jp", - "kyotango.kyoto.jp", - "maizuru.kyoto.jp", - "minami.kyoto.jp", - "minamiyamashiro.kyoto.jp", - "miyazu.kyoto.jp", - "muko.kyoto.jp", - "nagaokakyo.kyoto.jp", - "nakagyo.kyoto.jp", - "nantan.kyoto.jp", - "oyamazaki.kyoto.jp", - "sakyo.kyoto.jp", - "seika.kyoto.jp", - "tanabe.kyoto.jp", - "uji.kyoto.jp", - "ujitawara.kyoto.jp", - "wazuka.kyoto.jp", - "yamashina.kyoto.jp", - "yawata.kyoto.jp", - "asahi.mie.jp", - "inabe.mie.jp", - "ise.mie.jp", - "kameyama.mie.jp", - "kawagoe.mie.jp", - "kiho.mie.jp", - "kisosaki.mie.jp", - "kiwa.mie.jp", - "komono.mie.jp", - "kumano.mie.jp", - "kuwana.mie.jp", - "matsusaka.mie.jp", - "meiwa.mie.jp", - "mihama.mie.jp", - "minamiise.mie.jp", - "misugi.mie.jp", - "miyama.mie.jp", - "nabari.mie.jp", - "shima.mie.jp", - "suzuka.mie.jp", - "tado.mie.jp", - "taiki.mie.jp", - "taki.mie.jp", - "tamaki.mie.jp", - "toba.mie.jp", - "tsu.mie.jp", - "udono.mie.jp", - "ureshino.mie.jp", - "watarai.mie.jp", - "yokkaichi.mie.jp", - "furukawa.miyagi.jp", - "higashimatsushima.miyagi.jp", - "ishinomaki.miyagi.jp", - "iwanuma.miyagi.jp", - "kakuda.miyagi.jp", - "kami.miyagi.jp", - "kawasaki.miyagi.jp", - "marumori.miyagi.jp", - "matsushima.miyagi.jp", - "minamisanriku.miyagi.jp", - "misato.miyagi.jp", - "murata.miyagi.jp", - "natori.miyagi.jp", - "ogawara.miyagi.jp", - "ohira.miyagi.jp", - "onagawa.miyagi.jp", - "osaki.miyagi.jp", - "rifu.miyagi.jp", - "semine.miyagi.jp", - "shibata.miyagi.jp", - "shichikashuku.miyagi.jp", - "shikama.miyagi.jp", - "shiogama.miyagi.jp", - "shiroishi.miyagi.jp", - "tagajo.miyagi.jp", - "taiwa.miyagi.jp", - "tome.miyagi.jp", - "tomiya.miyagi.jp", - "wakuya.miyagi.jp", - "watari.miyagi.jp", - "yamamoto.miyagi.jp", - "zao.miyagi.jp", - "aya.miyazaki.jp", - "ebino.miyazaki.jp", - "gokase.miyazaki.jp", - "hyuga.miyazaki.jp", - "kadogawa.miyazaki.jp", - "kawaminami.miyazaki.jp", - "kijo.miyazaki.jp", - "kitagawa.miyazaki.jp", - "kitakata.miyazaki.jp", - "kitaura.miyazaki.jp", - "kobayashi.miyazaki.jp", - "kunitomi.miyazaki.jp", - "kushima.miyazaki.jp", - "mimata.miyazaki.jp", - "miyakonojo.miyazaki.jp", - "miyazaki.miyazaki.jp", - "morotsuka.miyazaki.jp", - "nichinan.miyazaki.jp", - "nishimera.miyazaki.jp", - "nobeoka.miyazaki.jp", - "saito.miyazaki.jp", - "shiiba.miyazaki.jp", - "shintomi.miyazaki.jp", - "takaharu.miyazaki.jp", - "takanabe.miyazaki.jp", - "takazaki.miyazaki.jp", - "tsuno.miyazaki.jp", - "achi.nagano.jp", - "agematsu.nagano.jp", - "anan.nagano.jp", - "aoki.nagano.jp", - "asahi.nagano.jp", - "azumino.nagano.jp", - "chikuhoku.nagano.jp", - "chikuma.nagano.jp", - "chino.nagano.jp", - "fujimi.nagano.jp", - "hakuba.nagano.jp", - "hara.nagano.jp", - "hiraya.nagano.jp", - "iida.nagano.jp", - "iijima.nagano.jp", - "iiyama.nagano.jp", - "iizuna.nagano.jp", - "ikeda.nagano.jp", - "ikusaka.nagano.jp", - "ina.nagano.jp", - "karuizawa.nagano.jp", - "kawakami.nagano.jp", - "kiso.nagano.jp", - "kisofukushima.nagano.jp", - "kitaaiki.nagano.jp", - "komagane.nagano.jp", - "komoro.nagano.jp", - "matsukawa.nagano.jp", - "matsumoto.nagano.jp", - "miasa.nagano.jp", - "minamiaiki.nagano.jp", - "minamimaki.nagano.jp", - "minamiminowa.nagano.jp", - "minowa.nagano.jp", - "miyada.nagano.jp", - "miyota.nagano.jp", - "mochizuki.nagano.jp", - "nagano.nagano.jp", - "nagawa.nagano.jp", - "nagiso.nagano.jp", - "nakagawa.nagano.jp", - "nakano.nagano.jp", - "nozawaonsen.nagano.jp", - "obuse.nagano.jp", - "ogawa.nagano.jp", - "okaya.nagano.jp", - "omachi.nagano.jp", - "omi.nagano.jp", - "ookuwa.nagano.jp", - "ooshika.nagano.jp", - "otaki.nagano.jp", - "otari.nagano.jp", - "sakae.nagano.jp", - "sakaki.nagano.jp", - "saku.nagano.jp", - "sakuho.nagano.jp", - "shimosuwa.nagano.jp", - "shinanomachi.nagano.jp", - "shiojiri.nagano.jp", - "suwa.nagano.jp", - "suzaka.nagano.jp", - "takagi.nagano.jp", - "takamori.nagano.jp", - "takayama.nagano.jp", - "tateshina.nagano.jp", - "tatsuno.nagano.jp", - "togakushi.nagano.jp", - "togura.nagano.jp", - "tomi.nagano.jp", - "ueda.nagano.jp", - "wada.nagano.jp", - "yamagata.nagano.jp", - "yamanouchi.nagano.jp", - "yasaka.nagano.jp", - "yasuoka.nagano.jp", - "chijiwa.nagasaki.jp", - "futsu.nagasaki.jp", - "goto.nagasaki.jp", - "hasami.nagasaki.jp", - "hirado.nagasaki.jp", - "iki.nagasaki.jp", - "isahaya.nagasaki.jp", - "kawatana.nagasaki.jp", - "kuchinotsu.nagasaki.jp", - "matsuura.nagasaki.jp", - "nagasaki.nagasaki.jp", - "obama.nagasaki.jp", - "omura.nagasaki.jp", - "oseto.nagasaki.jp", - "saikai.nagasaki.jp", - "sasebo.nagasaki.jp", - "seihi.nagasaki.jp", - "shimabara.nagasaki.jp", - "shinkamigoto.nagasaki.jp", - "togitsu.nagasaki.jp", - "tsushima.nagasaki.jp", - "unzen.nagasaki.jp", - "ando.nara.jp", - "gose.nara.jp", - "heguri.nara.jp", - "higashiyoshino.nara.jp", - "ikaruga.nara.jp", - "ikoma.nara.jp", - "kamikitayama.nara.jp", - "kanmaki.nara.jp", - "kashiba.nara.jp", - "kashihara.nara.jp", - "katsuragi.nara.jp", - "kawai.nara.jp", - "kawakami.nara.jp", - "kawanishi.nara.jp", - "koryo.nara.jp", - "kurotaki.nara.jp", - "mitsue.nara.jp", - "miyake.nara.jp", - "nara.nara.jp", - "nosegawa.nara.jp", - "oji.nara.jp", - "ouda.nara.jp", - "oyodo.nara.jp", - "sakurai.nara.jp", - "sango.nara.jp", - "shimoichi.nara.jp", - "shimokitayama.nara.jp", - "shinjo.nara.jp", - "soni.nara.jp", - "takatori.nara.jp", - "tawaramoto.nara.jp", - "tenkawa.nara.jp", - "tenri.nara.jp", - "uda.nara.jp", - "yamatokoriyama.nara.jp", - "yamatotakada.nara.jp", - "yamazoe.nara.jp", - "yoshino.nara.jp", - "aga.niigata.jp", - "agano.niigata.jp", - "gosen.niigata.jp", - "itoigawa.niigata.jp", - "izumozaki.niigata.jp", - "joetsu.niigata.jp", - "kamo.niigata.jp", - "kariwa.niigata.jp", - "kashiwazaki.niigata.jp", - "minamiuonuma.niigata.jp", - "mitsuke.niigata.jp", - "muika.niigata.jp", - "murakami.niigata.jp", - "myoko.niigata.jp", - "nagaoka.niigata.jp", - "niigata.niigata.jp", - "ojiya.niigata.jp", - "omi.niigata.jp", - "sado.niigata.jp", - "sanjo.niigata.jp", - "seiro.niigata.jp", - "seirou.niigata.jp", - "sekikawa.niigata.jp", - "shibata.niigata.jp", - "tagami.niigata.jp", - "tainai.niigata.jp", - "tochio.niigata.jp", - "tokamachi.niigata.jp", - "tsubame.niigata.jp", - "tsunan.niigata.jp", - "uonuma.niigata.jp", - "yahiko.niigata.jp", - "yoita.niigata.jp", - "yuzawa.niigata.jp", - "beppu.oita.jp", - "bungoono.oita.jp", - "bungotakada.oita.jp", - "hasama.oita.jp", - "hiji.oita.jp", - "himeshima.oita.jp", - "hita.oita.jp", - "kamitsue.oita.jp", - "kokonoe.oita.jp", - "kuju.oita.jp", - "kunisaki.oita.jp", - "kusu.oita.jp", - "oita.oita.jp", - "saiki.oita.jp", - "taketa.oita.jp", - "tsukumi.oita.jp", - "usa.oita.jp", - "usuki.oita.jp", - "yufu.oita.jp", - "akaiwa.okayama.jp", - "asakuchi.okayama.jp", - "bizen.okayama.jp", - "hayashima.okayama.jp", - "ibara.okayama.jp", - "kagamino.okayama.jp", - "kasaoka.okayama.jp", - "kibichuo.okayama.jp", - "kumenan.okayama.jp", - "kurashiki.okayama.jp", - "maniwa.okayama.jp", - "misaki.okayama.jp", - "nagi.okayama.jp", - "niimi.okayama.jp", - "nishiawakura.okayama.jp", - "okayama.okayama.jp", - "satosho.okayama.jp", - "setouchi.okayama.jp", - "shinjo.okayama.jp", - "shoo.okayama.jp", - "soja.okayama.jp", - "takahashi.okayama.jp", - "tamano.okayama.jp", - "tsuyama.okayama.jp", - "wake.okayama.jp", - "yakage.okayama.jp", - "aguni.okinawa.jp", - "ginowan.okinawa.jp", - "ginoza.okinawa.jp", - "gushikami.okinawa.jp", - "haebaru.okinawa.jp", - "higashi.okinawa.jp", - "hirara.okinawa.jp", - "iheya.okinawa.jp", - "ishigaki.okinawa.jp", - "ishikawa.okinawa.jp", - "itoman.okinawa.jp", - "izena.okinawa.jp", - "kadena.okinawa.jp", - "kin.okinawa.jp", - "kitadaito.okinawa.jp", - "kitanakagusuku.okinawa.jp", - "kumejima.okinawa.jp", - "kunigami.okinawa.jp", - "minamidaito.okinawa.jp", - "motobu.okinawa.jp", - "nago.okinawa.jp", - "naha.okinawa.jp", - "nakagusuku.okinawa.jp", - "nakijin.okinawa.jp", - "nanjo.okinawa.jp", - "nishihara.okinawa.jp", - "ogimi.okinawa.jp", - "okinawa.okinawa.jp", - "onna.okinawa.jp", - "shimoji.okinawa.jp", - "taketomi.okinawa.jp", - "tarama.okinawa.jp", - "tokashiki.okinawa.jp", - "tomigusuku.okinawa.jp", - "tonaki.okinawa.jp", - "urasoe.okinawa.jp", - "uruma.okinawa.jp", - "yaese.okinawa.jp", - "yomitan.okinawa.jp", - "yonabaru.okinawa.jp", - "yonaguni.okinawa.jp", - "zamami.okinawa.jp", - "abeno.osaka.jp", - "chihayaakasaka.osaka.jp", - "chuo.osaka.jp", - "daito.osaka.jp", - "fujiidera.osaka.jp", - "habikino.osaka.jp", - "hannan.osaka.jp", - "higashiosaka.osaka.jp", - "higashisumiyoshi.osaka.jp", - "higashiyodogawa.osaka.jp", - "hirakata.osaka.jp", - "ibaraki.osaka.jp", - "ikeda.osaka.jp", - "izumi.osaka.jp", - "izumiotsu.osaka.jp", - "izumisano.osaka.jp", - "kadoma.osaka.jp", - "kaizuka.osaka.jp", - "kanan.osaka.jp", - "kashiwara.osaka.jp", - "katano.osaka.jp", - "kawachinagano.osaka.jp", - "kishiwada.osaka.jp", - "kita.osaka.jp", - "kumatori.osaka.jp", - "matsubara.osaka.jp", - "minato.osaka.jp", - "minoh.osaka.jp", - "misaki.osaka.jp", - "moriguchi.osaka.jp", - "neyagawa.osaka.jp", - "nishi.osaka.jp", - "nose.osaka.jp", - "osakasayama.osaka.jp", - "sakai.osaka.jp", - "sayama.osaka.jp", - "sennan.osaka.jp", - "settsu.osaka.jp", - "shijonawate.osaka.jp", - "shimamoto.osaka.jp", - "suita.osaka.jp", - "tadaoka.osaka.jp", - "taishi.osaka.jp", - "tajiri.osaka.jp", - "takaishi.osaka.jp", - "takatsuki.osaka.jp", - "tondabayashi.osaka.jp", - "toyonaka.osaka.jp", - "toyono.osaka.jp", - "yao.osaka.jp", - "ariake.saga.jp", - "arita.saga.jp", - "fukudomi.saga.jp", - "genkai.saga.jp", - "hamatama.saga.jp", - "hizen.saga.jp", - "imari.saga.jp", - "kamimine.saga.jp", - "kanzaki.saga.jp", - "karatsu.saga.jp", - "kashima.saga.jp", - "kitagata.saga.jp", - "kitahata.saga.jp", - "kiyama.saga.jp", - "kouhoku.saga.jp", - "kyuragi.saga.jp", - "nishiarita.saga.jp", - "ogi.saga.jp", - "omachi.saga.jp", - "ouchi.saga.jp", - "saga.saga.jp", - "shiroishi.saga.jp", - "taku.saga.jp", - "tara.saga.jp", - "tosu.saga.jp", - "yoshinogari.saga.jp", - "arakawa.saitama.jp", - "asaka.saitama.jp", - "chichibu.saitama.jp", - "fujimi.saitama.jp", - "fujimino.saitama.jp", - "fukaya.saitama.jp", - "hanno.saitama.jp", - "hanyu.saitama.jp", - "hasuda.saitama.jp", - "hatogaya.saitama.jp", - "hatoyama.saitama.jp", - "hidaka.saitama.jp", - "higashichichibu.saitama.jp", - "higashimatsuyama.saitama.jp", - "honjo.saitama.jp", - "ina.saitama.jp", - "iruma.saitama.jp", - "iwatsuki.saitama.jp", - "kamiizumi.saitama.jp", - "kamikawa.saitama.jp", - "kamisato.saitama.jp", - "kasukabe.saitama.jp", - "kawagoe.saitama.jp", - "kawaguchi.saitama.jp", - "kawajima.saitama.jp", - "kazo.saitama.jp", - "kitamoto.saitama.jp", - "koshigaya.saitama.jp", - "kounosu.saitama.jp", - "kuki.saitama.jp", - "kumagaya.saitama.jp", - "matsubushi.saitama.jp", - "minano.saitama.jp", - "misato.saitama.jp", - "miyashiro.saitama.jp", - "miyoshi.saitama.jp", - "moroyama.saitama.jp", - "nagatoro.saitama.jp", - "namegawa.saitama.jp", - "niiza.saitama.jp", - "ogano.saitama.jp", - "ogawa.saitama.jp", - "ogose.saitama.jp", - "okegawa.saitama.jp", - "omiya.saitama.jp", - "otaki.saitama.jp", - "ranzan.saitama.jp", - "ryokami.saitama.jp", - "saitama.saitama.jp", - "sakado.saitama.jp", - "satte.saitama.jp", - "sayama.saitama.jp", - "shiki.saitama.jp", - "shiraoka.saitama.jp", - "soka.saitama.jp", - "sugito.saitama.jp", - "toda.saitama.jp", - "tokigawa.saitama.jp", - "tokorozawa.saitama.jp", - "tsurugashima.saitama.jp", - "urawa.saitama.jp", - "warabi.saitama.jp", - "yashio.saitama.jp", - "yokoze.saitama.jp", - "yono.saitama.jp", - "yorii.saitama.jp", - "yoshida.saitama.jp", - "yoshikawa.saitama.jp", - "yoshimi.saitama.jp", - "aisho.shiga.jp", - "gamo.shiga.jp", - "higashiomi.shiga.jp", - "hikone.shiga.jp", - "koka.shiga.jp", - "konan.shiga.jp", - "kosei.shiga.jp", - "koto.shiga.jp", - "kusatsu.shiga.jp", - "maibara.shiga.jp", - "moriyama.shiga.jp", - "nagahama.shiga.jp", - "nishiazai.shiga.jp", - "notogawa.shiga.jp", - "omihachiman.shiga.jp", - "otsu.shiga.jp", - "ritto.shiga.jp", - "ryuoh.shiga.jp", - "takashima.shiga.jp", - "takatsuki.shiga.jp", - "torahime.shiga.jp", - "toyosato.shiga.jp", - "yasu.shiga.jp", - "akagi.shimane.jp", - "ama.shimane.jp", - "gotsu.shimane.jp", - "hamada.shimane.jp", - "higashiizumo.shimane.jp", - "hikawa.shimane.jp", - "hikimi.shimane.jp", - "izumo.shimane.jp", - "kakinoki.shimane.jp", - "masuda.shimane.jp", - "matsue.shimane.jp", - "misato.shimane.jp", - "nishinoshima.shimane.jp", - "ohda.shimane.jp", - "okinoshima.shimane.jp", - "okuizumo.shimane.jp", - "shimane.shimane.jp", - "tamayu.shimane.jp", - "tsuwano.shimane.jp", - "unnan.shimane.jp", - "yakumo.shimane.jp", - "yasugi.shimane.jp", - "yatsuka.shimane.jp", - "arai.shizuoka.jp", - "atami.shizuoka.jp", - "fuji.shizuoka.jp", - "fujieda.shizuoka.jp", - "fujikawa.shizuoka.jp", - "fujinomiya.shizuoka.jp", - "fukuroi.shizuoka.jp", - "gotemba.shizuoka.jp", - "haibara.shizuoka.jp", - "hamamatsu.shizuoka.jp", - "higashiizu.shizuoka.jp", - "ito.shizuoka.jp", - "iwata.shizuoka.jp", - "izu.shizuoka.jp", - "izunokuni.shizuoka.jp", - "kakegawa.shizuoka.jp", - "kannami.shizuoka.jp", - "kawanehon.shizuoka.jp", - "kawazu.shizuoka.jp", - "kikugawa.shizuoka.jp", - "kosai.shizuoka.jp", - "makinohara.shizuoka.jp", - "matsuzaki.shizuoka.jp", - "minamiizu.shizuoka.jp", - "mishima.shizuoka.jp", - "morimachi.shizuoka.jp", - "nishiizu.shizuoka.jp", - "numazu.shizuoka.jp", - "omaezaki.shizuoka.jp", - "shimada.shizuoka.jp", - "shimizu.shizuoka.jp", - "shimoda.shizuoka.jp", - "shizuoka.shizuoka.jp", - "susono.shizuoka.jp", - "yaizu.shizuoka.jp", - "yoshida.shizuoka.jp", - "ashikaga.tochigi.jp", - "bato.tochigi.jp", - "haga.tochigi.jp", - "ichikai.tochigi.jp", - "iwafune.tochigi.jp", - "kaminokawa.tochigi.jp", - "kanuma.tochigi.jp", - "karasuyama.tochigi.jp", - "kuroiso.tochigi.jp", - "mashiko.tochigi.jp", - "mibu.tochigi.jp", - "moka.tochigi.jp", - "motegi.tochigi.jp", - "nasu.tochigi.jp", - "nasushiobara.tochigi.jp", - "nikko.tochigi.jp", - "nishikata.tochigi.jp", - "nogi.tochigi.jp", - "ohira.tochigi.jp", - "ohtawara.tochigi.jp", - "oyama.tochigi.jp", - "sakura.tochigi.jp", - "sano.tochigi.jp", - "shimotsuke.tochigi.jp", - "shioya.tochigi.jp", - "takanezawa.tochigi.jp", - "tochigi.tochigi.jp", - "tsuga.tochigi.jp", - "ujiie.tochigi.jp", - "utsunomiya.tochigi.jp", - "yaita.tochigi.jp", - "aizumi.tokushima.jp", - "anan.tokushima.jp", - "ichiba.tokushima.jp", - "itano.tokushima.jp", - "kainan.tokushima.jp", - "komatsushima.tokushima.jp", - "matsushige.tokushima.jp", - "mima.tokushima.jp", - "minami.tokushima.jp", - "miyoshi.tokushima.jp", - "mugi.tokushima.jp", - "nakagawa.tokushima.jp", - "naruto.tokushima.jp", - "sanagochi.tokushima.jp", - "shishikui.tokushima.jp", - "tokushima.tokushima.jp", - "wajiki.tokushima.jp", - "adachi.tokyo.jp", - "akiruno.tokyo.jp", - "akishima.tokyo.jp", - "aogashima.tokyo.jp", - "arakawa.tokyo.jp", - "bunkyo.tokyo.jp", - "chiyoda.tokyo.jp", - "chofu.tokyo.jp", - "chuo.tokyo.jp", - "edogawa.tokyo.jp", - "fuchu.tokyo.jp", - "fussa.tokyo.jp", - "hachijo.tokyo.jp", - "hachioji.tokyo.jp", - "hamura.tokyo.jp", - "higashikurume.tokyo.jp", - "higashimurayama.tokyo.jp", - "higashiyamato.tokyo.jp", - "hino.tokyo.jp", - "hinode.tokyo.jp", - "hinohara.tokyo.jp", - "inagi.tokyo.jp", - "itabashi.tokyo.jp", - "katsushika.tokyo.jp", - "kita.tokyo.jp", - "kiyose.tokyo.jp", - "kodaira.tokyo.jp", - "koganei.tokyo.jp", - "kokubunji.tokyo.jp", - "komae.tokyo.jp", - "koto.tokyo.jp", - "kouzushima.tokyo.jp", - "kunitachi.tokyo.jp", - "machida.tokyo.jp", - "meguro.tokyo.jp", - "minato.tokyo.jp", - "mitaka.tokyo.jp", - "mizuho.tokyo.jp", - "musashimurayama.tokyo.jp", - "musashino.tokyo.jp", - "nakano.tokyo.jp", - "nerima.tokyo.jp", - "ogasawara.tokyo.jp", - "okutama.tokyo.jp", - "ome.tokyo.jp", - "oshima.tokyo.jp", - "ota.tokyo.jp", - "setagaya.tokyo.jp", - "shibuya.tokyo.jp", - "shinagawa.tokyo.jp", - "shinjuku.tokyo.jp", - "suginami.tokyo.jp", - "sumida.tokyo.jp", - "tachikawa.tokyo.jp", - "taito.tokyo.jp", - "tama.tokyo.jp", - "toshima.tokyo.jp", - "chizu.tottori.jp", - "hino.tottori.jp", - "kawahara.tottori.jp", - "koge.tottori.jp", - "kotoura.tottori.jp", - "misasa.tottori.jp", - "nanbu.tottori.jp", - "nichinan.tottori.jp", - "sakaiminato.tottori.jp", - "tottori.tottori.jp", - "wakasa.tottori.jp", - "yazu.tottori.jp", - "yonago.tottori.jp", - "asahi.toyama.jp", - "fuchu.toyama.jp", - "fukumitsu.toyama.jp", - "funahashi.toyama.jp", - "himi.toyama.jp", - "imizu.toyama.jp", - "inami.toyama.jp", - "johana.toyama.jp", - "kamiichi.toyama.jp", - "kurobe.toyama.jp", - "nakaniikawa.toyama.jp", - "namerikawa.toyama.jp", - "nanto.toyama.jp", - "nyuzen.toyama.jp", - "oyabe.toyama.jp", - "taira.toyama.jp", - "takaoka.toyama.jp", - "tateyama.toyama.jp", - "toga.toyama.jp", - "tonami.toyama.jp", - "toyama.toyama.jp", - "unazuki.toyama.jp", - "uozu.toyama.jp", - "yamada.toyama.jp", - "arida.wakayama.jp", - "aridagawa.wakayama.jp", - "gobo.wakayama.jp", - "hashimoto.wakayama.jp", - "hidaka.wakayama.jp", - "hirogawa.wakayama.jp", - "inami.wakayama.jp", - "iwade.wakayama.jp", - "kainan.wakayama.jp", - "kamitonda.wakayama.jp", - "katsuragi.wakayama.jp", - "kimino.wakayama.jp", - "kinokawa.wakayama.jp", - "kitayama.wakayama.jp", - "koya.wakayama.jp", - "koza.wakayama.jp", - "kozagawa.wakayama.jp", - "kudoyama.wakayama.jp", - "kushimoto.wakayama.jp", - "mihama.wakayama.jp", - "misato.wakayama.jp", - "nachikatsuura.wakayama.jp", - "shingu.wakayama.jp", - "shirahama.wakayama.jp", - "taiji.wakayama.jp", - "tanabe.wakayama.jp", - "wakayama.wakayama.jp", - "yuasa.wakayama.jp", - "yura.wakayama.jp", - "asahi.yamagata.jp", - "funagata.yamagata.jp", - "higashine.yamagata.jp", - "iide.yamagata.jp", - "kahoku.yamagata.jp", - "kaminoyama.yamagata.jp", - "kaneyama.yamagata.jp", - "kawanishi.yamagata.jp", - "mamurogawa.yamagata.jp", - "mikawa.yamagata.jp", - "murayama.yamagata.jp", - "nagai.yamagata.jp", - "nakayama.yamagata.jp", - "nanyo.yamagata.jp", - "nishikawa.yamagata.jp", - "obanazawa.yamagata.jp", - "oe.yamagata.jp", - "oguni.yamagata.jp", - "ohkura.yamagata.jp", - "oishida.yamagata.jp", - "sagae.yamagata.jp", - "sakata.yamagata.jp", - "sakegawa.yamagata.jp", - "shinjo.yamagata.jp", - "shirataka.yamagata.jp", - "shonai.yamagata.jp", - "takahata.yamagata.jp", - "tendo.yamagata.jp", - "tozawa.yamagata.jp", - "tsuruoka.yamagata.jp", - "yamagata.yamagata.jp", - "yamanobe.yamagata.jp", - "yonezawa.yamagata.jp", - "yuza.yamagata.jp", - "abu.yamaguchi.jp", - "hagi.yamaguchi.jp", - "hikari.yamaguchi.jp", - "hofu.yamaguchi.jp", - "iwakuni.yamaguchi.jp", - "kudamatsu.yamaguchi.jp", - "mitou.yamaguchi.jp", - "nagato.yamaguchi.jp", - "oshima.yamaguchi.jp", - "shimonoseki.yamaguchi.jp", - "shunan.yamaguchi.jp", - "tabuse.yamaguchi.jp", - "tokuyama.yamaguchi.jp", - "toyota.yamaguchi.jp", - "ube.yamaguchi.jp", - "yuu.yamaguchi.jp", - "chuo.yamanashi.jp", - "doshi.yamanashi.jp", - "fuefuki.yamanashi.jp", - "fujikawa.yamanashi.jp", - "fujikawaguchiko.yamanashi.jp", - "fujiyoshida.yamanashi.jp", - "hayakawa.yamanashi.jp", - "hokuto.yamanashi.jp", - "ichikawamisato.yamanashi.jp", - "kai.yamanashi.jp", - "kofu.yamanashi.jp", - "koshu.yamanashi.jp", - "kosuge.yamanashi.jp", - "minami-alps.yamanashi.jp", - "minobu.yamanashi.jp", - "nakamichi.yamanashi.jp", - "nanbu.yamanashi.jp", - "narusawa.yamanashi.jp", - "nirasaki.yamanashi.jp", - "nishikatsura.yamanashi.jp", - "oshino.yamanashi.jp", - "otsuki.yamanashi.jp", - "showa.yamanashi.jp", - "tabayama.yamanashi.jp", - "tsuru.yamanashi.jp", - "uenohara.yamanashi.jp", - "yamanakako.yamanashi.jp", - "yamanashi.yamanashi.jp", - "*.ke", - "kg", - "org.kg", - "net.kg", - "com.kg", - "edu.kg", - "gov.kg", - "mil.kg", - "*.kh", - "ki", - "edu.ki", - "biz.ki", - "net.ki", - "org.ki", - "gov.ki", - "info.ki", - "com.ki", - "km", - "org.km", - "nom.km", - "gov.km", - "prd.km", - "tm.km", - "edu.km", - "mil.km", - "ass.km", - "com.km", - "coop.km", - "asso.km", - "presse.km", - "medecin.km", - "notaires.km", - "pharmaciens.km", - "veterinaire.km", - "gouv.km", - "kn", - "net.kn", - "org.kn", - "edu.kn", - "gov.kn", - "kp", - "com.kp", - "edu.kp", - "gov.kp", - "org.kp", - "rep.kp", - "tra.kp", - "kr", - "ac.kr", - "co.kr", - "es.kr", - "go.kr", - "hs.kr", - "kg.kr", - "mil.kr", - "ms.kr", - "ne.kr", - "or.kr", - "pe.kr", - "re.kr", - "sc.kr", - "busan.kr", - "chungbuk.kr", - "chungnam.kr", - "daegu.kr", - "daejeon.kr", - "gangwon.kr", - "gwangju.kr", - "gyeongbuk.kr", - "gyeonggi.kr", - "gyeongnam.kr", - "incheon.kr", - "jeju.kr", - "jeonbuk.kr", - "jeonnam.kr", - "seoul.kr", - "ulsan.kr", - "*.kw", - "ky", - "edu.ky", - "gov.ky", - "com.ky", - "org.ky", - "net.ky", - "kz", - "org.kz", - "edu.kz", - "net.kz", - "gov.kz", - "mil.kz", - "com.kz", - "la", - "int.la", - "net.la", - "info.la", - "edu.la", - "gov.la", - "per.la", - "com.la", - "org.la", - "lb", - "com.lb", - "edu.lb", - "gov.lb", - "net.lb", - "org.lb", - "lc", - "com.lc", - "net.lc", - "co.lc", - "org.lc", - "edu.lc", - "gov.lc", - "li", - "lk", - "gov.lk", - "sch.lk", - "net.lk", - "int.lk", - "com.lk", - "org.lk", - "edu.lk", - "ngo.lk", - "soc.lk", - "web.lk", - "ltd.lk", - "assn.lk", - "grp.lk", - "hotel.lk", - "ac.lk", - "lr", - "com.lr", - "edu.lr", - "gov.lr", - "org.lr", - "net.lr", - "ls", - "co.ls", - "org.ls", - "lt", - "gov.lt", - "lu", - "lv", - "com.lv", - "edu.lv", - "gov.lv", - "org.lv", - "mil.lv", - "id.lv", - "net.lv", - "asn.lv", - "conf.lv", - "ly", - "com.ly", - "net.ly", - "gov.ly", - "plc.ly", - "edu.ly", - "sch.ly", - "med.ly", - "org.ly", - "id.ly", - "ma", - "co.ma", - "net.ma", - "gov.ma", - "org.ma", - "ac.ma", - "press.ma", - "mc", - "tm.mc", - "asso.mc", - "md", - "me", - "co.me", - "net.me", - "org.me", - "edu.me", - "ac.me", - "gov.me", - "its.me", - "priv.me", - "mg", - "org.mg", - "nom.mg", - "gov.mg", - "prd.mg", - "tm.mg", - "edu.mg", - "mil.mg", - "com.mg", - "co.mg", - "mh", - "mil", - "mk", - "com.mk", - "org.mk", - "net.mk", - "edu.mk", - "gov.mk", - "inf.mk", - "name.mk", - "ml", - "com.ml", - "edu.ml", - "gouv.ml", - "gov.ml", - "net.ml", - "org.ml", - "presse.ml", - "*.mm", - "mn", - "gov.mn", - "edu.mn", - "org.mn", - "mo", - "com.mo", - "net.mo", - "org.mo", - "edu.mo", - "gov.mo", - "mobi", - "mp", - "mq", - "mr", - "gov.mr", - "ms", - "com.ms", - "edu.ms", - "gov.ms", - "net.ms", - "org.ms", - "mt", - "com.mt", - "edu.mt", - "net.mt", - "org.mt", - "mu", - "com.mu", - "net.mu", - "org.mu", - "gov.mu", - "ac.mu", - "co.mu", - "or.mu", - "museum", - "academy.museum", - "agriculture.museum", - "air.museum", - "airguard.museum", - "alabama.museum", - "alaska.museum", - "amber.museum", - "ambulance.museum", - "american.museum", - "americana.museum", - "americanantiques.museum", - "americanart.museum", - "amsterdam.museum", - "and.museum", - "annefrank.museum", - "anthro.museum", - "anthropology.museum", - "antiques.museum", - "aquarium.museum", - "arboretum.museum", - "archaeological.museum", - "archaeology.museum", - "architecture.museum", - "art.museum", - "artanddesign.museum", - "artcenter.museum", - "artdeco.museum", - "arteducation.museum", - "artgallery.museum", - "arts.museum", - "artsandcrafts.museum", - "asmatart.museum", - "assassination.museum", - "assisi.museum", - "association.museum", - "astronomy.museum", - "atlanta.museum", - "austin.museum", - "australia.museum", - "automotive.museum", - "aviation.museum", - "axis.museum", - "badajoz.museum", - "baghdad.museum", - "bahn.museum", - "bale.museum", - "baltimore.museum", - "barcelona.museum", - "baseball.museum", - "basel.museum", - "baths.museum", - "bauern.museum", - "beauxarts.museum", - "beeldengeluid.museum", - "bellevue.museum", - "bergbau.museum", - "berkeley.museum", - "berlin.museum", - "bern.museum", - "bible.museum", - "bilbao.museum", - "bill.museum", - "birdart.museum", - "birthplace.museum", - "bonn.museum", - "boston.museum", - "botanical.museum", - "botanicalgarden.museum", - "botanicgarden.museum", - "botany.museum", - "brandywinevalley.museum", - "brasil.museum", - "bristol.museum", - "british.museum", - "britishcolumbia.museum", - "broadcast.museum", - "brunel.museum", - "brussel.museum", - "brussels.museum", - "bruxelles.museum", - "building.museum", - "burghof.museum", - "bus.museum", - "bushey.museum", - "cadaques.museum", - "california.museum", - "cambridge.museum", - "can.museum", - "canada.museum", - "capebreton.museum", - "carrier.museum", - "cartoonart.museum", - "casadelamoneda.museum", - "castle.museum", - "castres.museum", - "celtic.museum", - "center.museum", - "chattanooga.museum", - "cheltenham.museum", - "chesapeakebay.museum", - "chicago.museum", - "children.museum", - "childrens.museum", - "childrensgarden.museum", - "chiropractic.museum", - "chocolate.museum", - "christiansburg.museum", - "cincinnati.museum", - "cinema.museum", - "circus.museum", - "civilisation.museum", - "civilization.museum", - "civilwar.museum", - "clinton.museum", - "clock.museum", - "coal.museum", - "coastaldefence.museum", - "cody.museum", - "coldwar.museum", - "collection.museum", - "colonialwilliamsburg.museum", - "coloradoplateau.museum", - "columbia.museum", - "columbus.museum", - "communication.museum", - "communications.museum", - "community.museum", - "computer.museum", - "computerhistory.museum", - "xn--comunicaes-v6a2o.museum", - "contemporary.museum", - "contemporaryart.museum", - "convent.museum", - "copenhagen.museum", - "corporation.museum", - "xn--correios-e-telecomunicaes-ghc29a.museum", - "corvette.museum", - "costume.museum", - "countryestate.museum", - "county.museum", - "crafts.museum", - "cranbrook.museum", - "creation.museum", - "cultural.museum", - "culturalcenter.museum", - "culture.museum", - "cyber.museum", - "cymru.museum", - "dali.museum", - "dallas.museum", - "database.museum", - "ddr.museum", - "decorativearts.museum", - "delaware.museum", - "delmenhorst.museum", - "denmark.museum", - "depot.museum", - "design.museum", - "detroit.museum", - "dinosaur.museum", - "discovery.museum", - "dolls.museum", - "donostia.museum", - "durham.museum", - "eastafrica.museum", - "eastcoast.museum", - "education.museum", - "educational.museum", - "egyptian.museum", - "eisenbahn.museum", - "elburg.museum", - "elvendrell.museum", - "embroidery.museum", - "encyclopedic.museum", - "england.museum", - "entomology.museum", - "environment.museum", - "environmentalconservation.museum", - "epilepsy.museum", - "essex.museum", - "estate.museum", - "ethnology.museum", - "exeter.museum", - "exhibition.museum", - "family.museum", - "farm.museum", - "farmequipment.museum", - "farmers.museum", - "farmstead.museum", - "field.museum", - "figueres.museum", - "filatelia.museum", - "film.museum", - "fineart.museum", - "finearts.museum", - "finland.museum", - "flanders.museum", - "florida.museum", - "force.museum", - "fortmissoula.museum", - "fortworth.museum", - "foundation.museum", - "francaise.museum", - "frankfurt.museum", - "franziskaner.museum", - "freemasonry.museum", - "freiburg.museum", - "fribourg.museum", - "frog.museum", - "fundacio.museum", - "furniture.museum", - "gallery.museum", - "garden.museum", - "gateway.museum", - "geelvinck.museum", - "gemological.museum", - "geology.museum", - "georgia.museum", - "giessen.museum", - "glas.museum", - "glass.museum", - "gorge.museum", - "grandrapids.museum", - "graz.museum", - "guernsey.museum", - "halloffame.museum", - "hamburg.museum", - "handson.museum", - "harvestcelebration.museum", - "hawaii.museum", - "health.museum", - "heimatunduhren.museum", - "hellas.museum", - "helsinki.museum", - "hembygdsforbund.museum", - "heritage.museum", - "histoire.museum", - "historical.museum", - "historicalsociety.museum", - "historichouses.museum", - "historisch.museum", - "historisches.museum", - "history.museum", - "historyofscience.museum", - "horology.museum", - "house.museum", - "humanities.museum", - "illustration.museum", - "imageandsound.museum", - "indian.museum", - "indiana.museum", - "indianapolis.museum", - "indianmarket.museum", - "intelligence.museum", - "interactive.museum", - "iraq.museum", - "iron.museum", - "isleofman.museum", - "jamison.museum", - "jefferson.museum", - "jerusalem.museum", - "jewelry.museum", - "jewish.museum", - "jewishart.museum", - "jfk.museum", - "journalism.museum", - "judaica.museum", - "judygarland.museum", - "juedisches.museum", - "juif.museum", - "karate.museum", - "karikatur.museum", - "kids.museum", - "koebenhavn.museum", - "koeln.museum", - "kunst.museum", - "kunstsammlung.museum", - "kunstunddesign.museum", - "labor.museum", - "labour.museum", - "lajolla.museum", - "lancashire.museum", - "landes.museum", - "lans.museum", - "xn--lns-qla.museum", - "larsson.museum", - "lewismiller.museum", - "lincoln.museum", - "linz.museum", - "living.museum", - "livinghistory.museum", - "localhistory.museum", - "london.museum", - "losangeles.museum", - "louvre.museum", - "loyalist.museum", - "lucerne.museum", - "luxembourg.museum", - "luzern.museum", - "mad.museum", - "madrid.museum", - "mallorca.museum", - "manchester.museum", - "mansion.museum", - "mansions.museum", - "manx.museum", - "marburg.museum", - "maritime.museum", - "maritimo.museum", - "maryland.museum", - "marylhurst.museum", - "media.museum", - "medical.museum", - "medizinhistorisches.museum", - "meeres.museum", - "memorial.museum", - "mesaverde.museum", - "michigan.museum", - "midatlantic.museum", - "military.museum", - "mill.museum", - "miners.museum", - "mining.museum", - "minnesota.museum", - "missile.museum", - "missoula.museum", - "modern.museum", - "moma.museum", - "money.museum", - "monmouth.museum", - "monticello.museum", - "montreal.museum", - "moscow.museum", - "motorcycle.museum", - "muenchen.museum", - "muenster.museum", - "mulhouse.museum", - "muncie.museum", - "museet.museum", - "museumcenter.museum", - "museumvereniging.museum", - "music.museum", - "national.museum", - "nationalfirearms.museum", - "nationalheritage.museum", - "nativeamerican.museum", - "naturalhistory.museum", - "naturalhistorymuseum.museum", - "naturalsciences.museum", - "nature.museum", - "naturhistorisches.museum", - "natuurwetenschappen.museum", - "naumburg.museum", - "naval.museum", - "nebraska.museum", - "neues.museum", - "newhampshire.museum", - "newjersey.museum", - "newmexico.museum", - "newport.museum", - "newspaper.museum", - "newyork.museum", - "niepce.museum", - "norfolk.museum", - "north.museum", - "nrw.museum", - "nuernberg.museum", - "nuremberg.museum", - "nyc.museum", - "nyny.museum", - "oceanographic.museum", - "oceanographique.museum", - "omaha.museum", - "online.museum", - "ontario.museum", - "openair.museum", - "oregon.museum", - "oregontrail.museum", - "otago.museum", - "oxford.museum", - "pacific.museum", - "paderborn.museum", - "palace.museum", - "paleo.museum", - "palmsprings.museum", - "panama.museum", - "paris.museum", - "pasadena.museum", - "pharmacy.museum", - "philadelphia.museum", - "philadelphiaarea.museum", - "philately.museum", - "phoenix.museum", - "photography.museum", - "pilots.museum", - "pittsburgh.museum", - "planetarium.museum", - "plantation.museum", - "plants.museum", - "plaza.museum", - "portal.museum", - "portland.museum", - "portlligat.museum", - "posts-and-telecommunications.museum", - "preservation.museum", - "presidio.museum", - "press.museum", - "project.museum", - "public.museum", - "pubol.museum", - "quebec.museum", - "railroad.museum", - "railway.museum", - "research.museum", - "resistance.museum", - "riodejaneiro.museum", - "rochester.museum", - "rockart.museum", - "roma.museum", - "russia.museum", - "saintlouis.museum", - "salem.museum", - "salvadordali.museum", - "salzburg.museum", - "sandiego.museum", - "sanfrancisco.museum", - "santabarbara.museum", - "santacruz.museum", - "santafe.museum", - "saskatchewan.museum", - "satx.museum", - "savannahga.museum", - "schlesisches.museum", - "schoenbrunn.museum", - "schokoladen.museum", - "school.museum", - "schweiz.museum", - "science.museum", - "scienceandhistory.museum", - "scienceandindustry.museum", - "sciencecenter.museum", - "sciencecenters.museum", - "science-fiction.museum", - "sciencehistory.museum", - "sciences.museum", - "sciencesnaturelles.museum", - "scotland.museum", - "seaport.museum", - "settlement.museum", - "settlers.museum", - "shell.museum", - "sherbrooke.museum", - "sibenik.museum", - "silk.museum", - "ski.museum", - "skole.museum", - "society.museum", - "sologne.museum", - "soundandvision.museum", - "southcarolina.museum", - "southwest.museum", - "space.museum", - "spy.museum", - "square.museum", - "stadt.museum", - "stalbans.museum", - "starnberg.museum", - "state.museum", - "stateofdelaware.museum", - "station.museum", - "steam.museum", - "steiermark.museum", - "stjohn.museum", - "stockholm.museum", - "stpetersburg.museum", - "stuttgart.museum", - "suisse.museum", - "surgeonshall.museum", - "surrey.museum", - "svizzera.museum", - "sweden.museum", - "sydney.museum", - "tank.museum", - "tcm.museum", - "technology.museum", - "telekommunikation.museum", - "television.museum", - "texas.museum", - "textile.museum", - "theater.museum", - "time.museum", - "timekeeping.museum", - "topology.museum", - "torino.museum", - "touch.museum", - "town.museum", - "transport.museum", - "tree.museum", - "trolley.museum", - "trust.museum", - "trustee.museum", - "uhren.museum", - "ulm.museum", - "undersea.museum", - "university.museum", - "usa.museum", - "usantiques.museum", - "usarts.museum", - "uscountryestate.museum", - "usculture.museum", - "usdecorativearts.museum", - "usgarden.museum", - "ushistory.museum", - "ushuaia.museum", - "uslivinghistory.museum", - "utah.museum", - "uvic.museum", - "valley.museum", - "vantaa.museum", - "versailles.museum", - "viking.museum", - "village.museum", - "virginia.museum", - "virtual.museum", - "virtuel.museum", - "vlaanderen.museum", - "volkenkunde.museum", - "wales.museum", - "wallonie.museum", - "war.museum", - "washingtondc.museum", - "watchandclock.museum", - "watch-and-clock.museum", - "western.museum", - "westfalen.museum", - "whaling.museum", - "wildlife.museum", - "williamsburg.museum", - "windmill.museum", - "workshop.museum", - "york.museum", - "yorkshire.museum", - "yosemite.museum", - "youth.museum", - "zoological.museum", - "zoology.museum", - "xn--9dbhblg6di.museum", - "xn--h1aegh.museum", - "mv", - "aero.mv", - "biz.mv", - "com.mv", - "coop.mv", - "edu.mv", - "gov.mv", - "info.mv", - "int.mv", - "mil.mv", - "museum.mv", - "name.mv", - "net.mv", - "org.mv", - "pro.mv", - "mw", - "ac.mw", - "biz.mw", - "co.mw", - "com.mw", - "coop.mw", - "edu.mw", - "gov.mw", - "int.mw", - "museum.mw", - "net.mw", - "org.mw", - "mx", - "com.mx", - "org.mx", - "gob.mx", - "edu.mx", - "net.mx", - "my", - "com.my", - "net.my", - "org.my", - "gov.my", - "edu.my", - "mil.my", - "name.my", - "mz", - "ac.mz", - "adv.mz", - "co.mz", - "edu.mz", - "gov.mz", - "mil.mz", - "net.mz", - "org.mz", - "na", - "info.na", - "pro.na", - "name.na", - "school.na", - "or.na", - "dr.na", - "us.na", - "mx.na", - "ca.na", - "in.na", - "cc.na", - "tv.na", - "ws.na", - "mobi.na", - "co.na", - "com.na", - "org.na", - "name", - "nc", - "asso.nc", - "nom.nc", - "ne", - "net", - "nf", - "com.nf", - "net.nf", - "per.nf", - "rec.nf", - "web.nf", - "arts.nf", - "firm.nf", - "info.nf", - "other.nf", - "store.nf", - "ng", - "com.ng", - "edu.ng", - "gov.ng", - "i.ng", - "mil.ng", - "mobi.ng", - "name.ng", - "net.ng", - "org.ng", - "sch.ng", - "ni", - "ac.ni", - "biz.ni", - "co.ni", - "com.ni", - "edu.ni", - "gob.ni", - "in.ni", - "info.ni", - "int.ni", - "mil.ni", - "net.ni", - "nom.ni", - "org.ni", - "web.ni", - "nl", - "bv.nl", - "no", - "fhs.no", - "vgs.no", - "fylkesbibl.no", - "folkebibl.no", - "museum.no", - "idrett.no", - "priv.no", - "mil.no", - "stat.no", - "dep.no", - "kommune.no", - "herad.no", - "aa.no", - "ah.no", - "bu.no", - "fm.no", - "hl.no", - "hm.no", - "jan-mayen.no", - "mr.no", - "nl.no", - "nt.no", - "of.no", - "ol.no", - "oslo.no", - "rl.no", - "sf.no", - "st.no", - "svalbard.no", - "tm.no", - "tr.no", - "va.no", - "vf.no", - "gs.aa.no", - "gs.ah.no", - "gs.bu.no", - "gs.fm.no", - "gs.hl.no", - "gs.hm.no", - "gs.jan-mayen.no", - "gs.mr.no", - "gs.nl.no", - "gs.nt.no", - "gs.of.no", - "gs.ol.no", - "gs.oslo.no", - "gs.rl.no", - "gs.sf.no", - "gs.st.no", - "gs.svalbard.no", - "gs.tm.no", - "gs.tr.no", - "gs.va.no", - "gs.vf.no", - "akrehamn.no", - "xn--krehamn-dxa.no", - "algard.no", - "xn--lgrd-poac.no", - "arna.no", - "brumunddal.no", - "bryne.no", - "bronnoysund.no", - "xn--brnnysund-m8ac.no", - "drobak.no", - "xn--drbak-wua.no", - "egersund.no", - "fetsund.no", - "floro.no", - "xn--flor-jra.no", - "fredrikstad.no", - "hokksund.no", - "honefoss.no", - "xn--hnefoss-q1a.no", - "jessheim.no", - "jorpeland.no", - "xn--jrpeland-54a.no", - "kirkenes.no", - "kopervik.no", - "krokstadelva.no", - "langevag.no", - "xn--langevg-jxa.no", - "leirvik.no", - "mjondalen.no", - "xn--mjndalen-64a.no", - "mo-i-rana.no", - "mosjoen.no", - "xn--mosjen-eya.no", - "nesoddtangen.no", - "orkanger.no", - "osoyro.no", - "xn--osyro-wua.no", - "raholt.no", - "xn--rholt-mra.no", - "sandnessjoen.no", - "xn--sandnessjen-ogb.no", - "skedsmokorset.no", - "slattum.no", - "spjelkavik.no", - "stathelle.no", - "stavern.no", - "stjordalshalsen.no", - "xn--stjrdalshalsen-sqb.no", - "tananger.no", - "tranby.no", - "vossevangen.no", - "afjord.no", - "xn--fjord-lra.no", - "agdenes.no", - "al.no", - "xn--l-1fa.no", - "alesund.no", - "xn--lesund-hua.no", - "alstahaug.no", - "alta.no", - "xn--lt-liac.no", - "alaheadju.no", - "xn--laheadju-7ya.no", - "alvdal.no", - "amli.no", - "xn--mli-tla.no", - "amot.no", - "xn--mot-tla.no", - "andebu.no", - "andoy.no", - "xn--andy-ira.no", - "andasuolo.no", - "ardal.no", - "xn--rdal-poa.no", - "aremark.no", - "arendal.no", - "xn--s-1fa.no", - "aseral.no", - "xn--seral-lra.no", - "asker.no", - "askim.no", - "askvoll.no", - "askoy.no", - "xn--asky-ira.no", - "asnes.no", - "xn--snes-poa.no", - "audnedaln.no", - "aukra.no", - "aure.no", - "aurland.no", - "aurskog-holand.no", - "xn--aurskog-hland-jnb.no", - "austevoll.no", - "austrheim.no", - "averoy.no", - "xn--avery-yua.no", - "balestrand.no", - "ballangen.no", - "balat.no", - "xn--blt-elab.no", - "balsfjord.no", - "bahccavuotna.no", - "xn--bhccavuotna-k7a.no", - "bamble.no", - "bardu.no", - "beardu.no", - "beiarn.no", - "bajddar.no", - "xn--bjddar-pta.no", - "baidar.no", - "xn--bidr-5nac.no", - "berg.no", - "bergen.no", - "berlevag.no", - "xn--berlevg-jxa.no", - "bearalvahki.no", - "xn--bearalvhki-y4a.no", - "bindal.no", - "birkenes.no", - "bjarkoy.no", - "xn--bjarky-fya.no", - "bjerkreim.no", - "bjugn.no", - "bodo.no", - "xn--bod-2na.no", - "badaddja.no", - "xn--bdddj-mrabd.no", - "budejju.no", - "bokn.no", - "bremanger.no", - "bronnoy.no", - "xn--brnny-wuac.no", - "bygland.no", - "bykle.no", - "barum.no", - "xn--brum-voa.no", - "bo.telemark.no", - "xn--b-5ga.telemark.no", - "bo.nordland.no", - "xn--b-5ga.nordland.no", - "bievat.no", - "xn--bievt-0qa.no", - "bomlo.no", - "xn--bmlo-gra.no", - "batsfjord.no", - "xn--btsfjord-9za.no", - "bahcavuotna.no", - "xn--bhcavuotna-s4a.no", - "dovre.no", - "drammen.no", - "drangedal.no", - "dyroy.no", - "xn--dyry-ira.no", - "donna.no", - "xn--dnna-gra.no", - "eid.no", - "eidfjord.no", - "eidsberg.no", - "eidskog.no", - "eidsvoll.no", - "eigersund.no", - "elverum.no", - "enebakk.no", - "engerdal.no", - "etne.no", - "etnedal.no", - "evenes.no", - "evenassi.no", - "xn--eveni-0qa01ga.no", - "evje-og-hornnes.no", - "farsund.no", - "fauske.no", - "fuossko.no", - "fuoisku.no", - "fedje.no", - "fet.no", - "finnoy.no", - "xn--finny-yua.no", - "fitjar.no", - "fjaler.no", - "fjell.no", - "flakstad.no", - "flatanger.no", - "flekkefjord.no", - "flesberg.no", - "flora.no", - "fla.no", - "xn--fl-zia.no", - "folldal.no", - "forsand.no", - "fosnes.no", - "frei.no", - "frogn.no", - "froland.no", - "frosta.no", - "frana.no", - "xn--frna-woa.no", - "froya.no", - "xn--frya-hra.no", - "fusa.no", - "fyresdal.no", - "forde.no", - "xn--frde-gra.no", - "gamvik.no", - "gangaviika.no", - "xn--ggaviika-8ya47h.no", - "gaular.no", - "gausdal.no", - "gildeskal.no", - "xn--gildeskl-g0a.no", - "giske.no", - "gjemnes.no", - "gjerdrum.no", - "gjerstad.no", - "gjesdal.no", - "gjovik.no", - "xn--gjvik-wua.no", - "gloppen.no", - "gol.no", - "gran.no", - "grane.no", - "granvin.no", - "gratangen.no", - "grimstad.no", - "grong.no", - "kraanghke.no", - "xn--kranghke-b0a.no", - "grue.no", - "gulen.no", - "hadsel.no", - "halden.no", - "halsa.no", - "hamar.no", - "hamaroy.no", - "habmer.no", - "xn--hbmer-xqa.no", - "hapmir.no", - "xn--hpmir-xqa.no", - "hammerfest.no", - "hammarfeasta.no", - "xn--hmmrfeasta-s4ac.no", - "haram.no", - "hareid.no", - "harstad.no", - "hasvik.no", - "aknoluokta.no", - "xn--koluokta-7ya57h.no", - "hattfjelldal.no", - "aarborte.no", - "haugesund.no", - "hemne.no", - "hemnes.no", - "hemsedal.no", - "heroy.more-og-romsdal.no", - "xn--hery-ira.xn--mre-og-romsdal-qqb.no", - "heroy.nordland.no", - "xn--hery-ira.nordland.no", - "hitra.no", - "hjartdal.no", - "hjelmeland.no", - "hobol.no", - "xn--hobl-ira.no", - "hof.no", - "hol.no", - "hole.no", - "holmestrand.no", - "holtalen.no", - "xn--holtlen-hxa.no", - "hornindal.no", - "horten.no", - "hurdal.no", - "hurum.no", - "hvaler.no", - "hyllestad.no", - "hagebostad.no", - "xn--hgebostad-g3a.no", - "hoyanger.no", - "xn--hyanger-q1a.no", - "hoylandet.no", - "xn--hylandet-54a.no", - "ha.no", - "xn--h-2fa.no", - "ibestad.no", - "inderoy.no", - "xn--indery-fya.no", - "iveland.no", - "jevnaker.no", - "jondal.no", - "jolster.no", - "xn--jlster-bya.no", - "karasjok.no", - "karasjohka.no", - "xn--krjohka-hwab49j.no", - "karlsoy.no", - "galsa.no", - "xn--gls-elac.no", - "karmoy.no", - "xn--karmy-yua.no", - "kautokeino.no", - "guovdageaidnu.no", - "klepp.no", - "klabu.no", - "xn--klbu-woa.no", - "kongsberg.no", - "kongsvinger.no", - "kragero.no", - "xn--krager-gya.no", - "kristiansand.no", - "kristiansund.no", - "krodsherad.no", - "xn--krdsherad-m8a.no", - "kvalsund.no", - "rahkkeravju.no", - "xn--rhkkervju-01af.no", - "kvam.no", - "kvinesdal.no", - "kvinnherad.no", - "kviteseid.no", - "kvitsoy.no", - "xn--kvitsy-fya.no", - "kvafjord.no", - "xn--kvfjord-nxa.no", - "giehtavuoatna.no", - "kvanangen.no", - "xn--kvnangen-k0a.no", - "navuotna.no", - "xn--nvuotna-hwa.no", - "kafjord.no", - "xn--kfjord-iua.no", - "gaivuotna.no", - "xn--givuotna-8ya.no", - "larvik.no", - "lavangen.no", - "lavagis.no", - "loabat.no", - "xn--loabt-0qa.no", - "lebesby.no", - "davvesiida.no", - "leikanger.no", - "leirfjord.no", - "leka.no", - "leksvik.no", - "lenvik.no", - "leangaviika.no", - "xn--leagaviika-52b.no", - "lesja.no", - "levanger.no", - "lier.no", - "lierne.no", - "lillehammer.no", - "lillesand.no", - "lindesnes.no", - "lindas.no", - "xn--linds-pra.no", - "lom.no", - "loppa.no", - "lahppi.no", - "xn--lhppi-xqa.no", - "lund.no", - "lunner.no", - "luroy.no", - "xn--lury-ira.no", - "luster.no", - "lyngdal.no", - "lyngen.no", - "ivgu.no", - "lardal.no", - "lerdal.no", - "xn--lrdal-sra.no", - "lodingen.no", - "xn--ldingen-q1a.no", - "lorenskog.no", - "xn--lrenskog-54a.no", - "loten.no", - "xn--lten-gra.no", - "malvik.no", - "masoy.no", - "xn--msy-ula0h.no", - "muosat.no", - "xn--muost-0qa.no", - "mandal.no", - "marker.no", - "marnardal.no", - "masfjorden.no", - "meland.no", - "meldal.no", - "melhus.no", - "meloy.no", - "xn--mely-ira.no", - "meraker.no", - "xn--merker-kua.no", - "moareke.no", - "xn--moreke-jua.no", - "midsund.no", - "midtre-gauldal.no", - "modalen.no", - "modum.no", - "molde.no", - "moskenes.no", - "moss.no", - "mosvik.no", - "malselv.no", - "xn--mlselv-iua.no", - "malatvuopmi.no", - "xn--mlatvuopmi-s4a.no", - "namdalseid.no", - "aejrie.no", - "namsos.no", - "namsskogan.no", - "naamesjevuemie.no", - "xn--nmesjevuemie-tcba.no", - "laakesvuemie.no", - "nannestad.no", - "narvik.no", - "narviika.no", - "naustdal.no", - "nedre-eiker.no", - "nes.akershus.no", - "nes.buskerud.no", - "nesna.no", - "nesodden.no", - "nesseby.no", - "unjarga.no", - "xn--unjrga-rta.no", - "nesset.no", - "nissedal.no", - "nittedal.no", - "nord-aurdal.no", - "nord-fron.no", - "nord-odal.no", - "norddal.no", - "nordkapp.no", - "davvenjarga.no", - "xn--davvenjrga-y4a.no", - "nordre-land.no", - "nordreisa.no", - "raisa.no", - "xn--risa-5na.no", - "nore-og-uvdal.no", - "notodden.no", - "naroy.no", - "xn--nry-yla5g.no", - "notteroy.no", - "xn--nttery-byae.no", - "odda.no", - "oksnes.no", - "xn--ksnes-uua.no", - "oppdal.no", - "oppegard.no", - "xn--oppegrd-ixa.no", - "orkdal.no", - "orland.no", - "xn--rland-uua.no", - "orskog.no", - "xn--rskog-uua.no", - "orsta.no", - "xn--rsta-fra.no", - "os.hedmark.no", - "os.hordaland.no", - "osen.no", - "osteroy.no", - "xn--ostery-fya.no", - "ostre-toten.no", - "xn--stre-toten-zcb.no", - "overhalla.no", - "ovre-eiker.no", - "xn--vre-eiker-k8a.no", - "oyer.no", - "xn--yer-zna.no", - "oygarden.no", - "xn--ygarden-p1a.no", - "oystre-slidre.no", - "xn--ystre-slidre-ujb.no", - "porsanger.no", - "porsangu.no", - "xn--porsgu-sta26f.no", - "porsgrunn.no", - "radoy.no", - "xn--rady-ira.no", - "rakkestad.no", - "rana.no", - "ruovat.no", - "randaberg.no", - "rauma.no", - "rendalen.no", - "rennebu.no", - "rennesoy.no", - "xn--rennesy-v1a.no", - "rindal.no", - "ringebu.no", - "ringerike.no", - "ringsaker.no", - "rissa.no", - "risor.no", - "xn--risr-ira.no", - "roan.no", - "rollag.no", - "rygge.no", - "ralingen.no", - "xn--rlingen-mxa.no", - "rodoy.no", - "xn--rdy-0nab.no", - "romskog.no", - "xn--rmskog-bya.no", - "roros.no", - "xn--rros-gra.no", - "rost.no", - "xn--rst-0na.no", - "royken.no", - "xn--ryken-vua.no", - "royrvik.no", - "xn--ryrvik-bya.no", - "rade.no", - "xn--rde-ula.no", - "salangen.no", - "siellak.no", - "saltdal.no", - "salat.no", - "xn--slt-elab.no", - "xn--slat-5na.no", - "samnanger.no", - "sande.more-og-romsdal.no", - "sande.xn--mre-og-romsdal-qqb.no", - "sande.vestfold.no", - "sandefjord.no", - "sandnes.no", - "sandoy.no", - "xn--sandy-yua.no", - "sarpsborg.no", - "sauda.no", - "sauherad.no", - "sel.no", - "selbu.no", - "selje.no", - "seljord.no", - "sigdal.no", - "siljan.no", - "sirdal.no", - "skaun.no", - "skedsmo.no", - "ski.no", - "skien.no", - "skiptvet.no", - "skjervoy.no", - "xn--skjervy-v1a.no", - "skierva.no", - "xn--skierv-uta.no", - "skjak.no", - "xn--skjk-soa.no", - "skodje.no", - "skanland.no", - "xn--sknland-fxa.no", - "skanit.no", - "xn--sknit-yqa.no", - "smola.no", - "xn--smla-hra.no", - "snillfjord.no", - "snasa.no", - "xn--snsa-roa.no", - "snoasa.no", - "snaase.no", - "xn--snase-nra.no", - "sogndal.no", - "sokndal.no", - "sola.no", - "solund.no", - "songdalen.no", - "sortland.no", - "spydeberg.no", - "stange.no", - "stavanger.no", - "steigen.no", - "steinkjer.no", - "stjordal.no", - "xn--stjrdal-s1a.no", - "stokke.no", - "stor-elvdal.no", - "stord.no", - "stordal.no", - "storfjord.no", - "omasvuotna.no", - "strand.no", - "stranda.no", - "stryn.no", - "sula.no", - "suldal.no", - "sund.no", - "sunndal.no", - "surnadal.no", - "sveio.no", - "svelvik.no", - "sykkylven.no", - "sogne.no", - "xn--sgne-gra.no", - "somna.no", - "xn--smna-gra.no", - "sondre-land.no", - "xn--sndre-land-0cb.no", - "sor-aurdal.no", - "xn--sr-aurdal-l8a.no", - "sor-fron.no", - "xn--sr-fron-q1a.no", - "sor-odal.no", - "xn--sr-odal-q1a.no", - "sor-varanger.no", - "xn--sr-varanger-ggb.no", - "matta-varjjat.no", - "xn--mtta-vrjjat-k7af.no", - "sorfold.no", - "xn--srfold-bya.no", - "sorreisa.no", - "xn--srreisa-q1a.no", - "sorum.no", - "xn--srum-gra.no", - "tana.no", - "deatnu.no", - "time.no", - "tingvoll.no", - "tinn.no", - "tjeldsund.no", - "dielddanuorri.no", - "tjome.no", - "xn--tjme-hra.no", - "tokke.no", - "tolga.no", - "torsken.no", - "tranoy.no", - "xn--trany-yua.no", - "tromso.no", - "xn--troms-zua.no", - "tromsa.no", - "romsa.no", - "trondheim.no", - "troandin.no", - "trysil.no", - "trana.no", - "xn--trna-woa.no", - "trogstad.no", - "xn--trgstad-r1a.no", - "tvedestrand.no", - "tydal.no", - "tynset.no", - "tysfjord.no", - "divtasvuodna.no", - "divttasvuotna.no", - "tysnes.no", - "tysvar.no", - "xn--tysvr-vra.no", - "tonsberg.no", - "xn--tnsberg-q1a.no", - "ullensaker.no", - "ullensvang.no", - "ulvik.no", - "utsira.no", - "vadso.no", - "xn--vads-jra.no", - "cahcesuolo.no", - "xn--hcesuolo-7ya35b.no", - "vaksdal.no", - "valle.no", - "vang.no", - "vanylven.no", - "vardo.no", - "xn--vard-jra.no", - "varggat.no", - "xn--vrggt-xqad.no", - "vefsn.no", - "vaapste.no", - "vega.no", - "vegarshei.no", - "xn--vegrshei-c0a.no", - "vennesla.no", - "verdal.no", - "verran.no", - "vestby.no", - "vestnes.no", - "vestre-slidre.no", - "vestre-toten.no", - "vestvagoy.no", - "xn--vestvgy-ixa6o.no", - "vevelstad.no", - "vik.no", - "vikna.no", - "vindafjord.no", - "volda.no", - "voss.no", - "varoy.no", - "xn--vry-yla5g.no", - "vagan.no", - "xn--vgan-qoa.no", - "voagat.no", - "vagsoy.no", - "xn--vgsy-qoa0j.no", - "vaga.no", - "xn--vg-yiab.no", - "valer.ostfold.no", - "xn--vler-qoa.xn--stfold-9xa.no", - "valer.hedmark.no", - "xn--vler-qoa.hedmark.no", - "*.np", - "nr", - "biz.nr", - "info.nr", - "gov.nr", - "edu.nr", - "org.nr", - "net.nr", - "com.nr", - "nu", - "nz", - "ac.nz", - "co.nz", - "cri.nz", - "geek.nz", - "gen.nz", - "govt.nz", - "health.nz", - "iwi.nz", - "kiwi.nz", - "maori.nz", - "mil.nz", - "xn--mori-qsa.nz", - "net.nz", - "org.nz", - "parliament.nz", - "school.nz", - "om", - "co.om", - "com.om", - "edu.om", - "gov.om", - "med.om", - "museum.om", - "net.om", - "org.om", - "pro.om", - "onion", - "org", - "pa", - "ac.pa", - "gob.pa", - "com.pa", - "org.pa", - "sld.pa", - "edu.pa", - "net.pa", - "ing.pa", - "abo.pa", - "med.pa", - "nom.pa", - "pe", - "edu.pe", - "gob.pe", - "nom.pe", - "mil.pe", - "org.pe", - "com.pe", - "net.pe", - "pf", - "com.pf", - "org.pf", - "edu.pf", - "*.pg", - "ph", - "com.ph", - "net.ph", - "org.ph", - "gov.ph", - "edu.ph", - "ngo.ph", - "mil.ph", - "i.ph", - "pk", - "com.pk", - "net.pk", - "edu.pk", - "org.pk", - "fam.pk", - "biz.pk", - "web.pk", - "gov.pk", - "gob.pk", - "gok.pk", - "gon.pk", - "gop.pk", - "gos.pk", - "info.pk", - "pl", - "com.pl", - "net.pl", - "org.pl", - "aid.pl", - "agro.pl", - "atm.pl", - "auto.pl", - "biz.pl", - "edu.pl", - "gmina.pl", - "gsm.pl", - "info.pl", - "mail.pl", - "miasta.pl", - "media.pl", - "mil.pl", - "nieruchomosci.pl", - "nom.pl", - "pc.pl", - "powiat.pl", - "priv.pl", - "realestate.pl", - "rel.pl", - "sex.pl", - "shop.pl", - "sklep.pl", - "sos.pl", - "szkola.pl", - "targi.pl", - "tm.pl", - "tourism.pl", - "travel.pl", - "turystyka.pl", - "gov.pl", - "ap.gov.pl", - "ic.gov.pl", - "is.gov.pl", - "us.gov.pl", - "kmpsp.gov.pl", - "kppsp.gov.pl", - "kwpsp.gov.pl", - "psp.gov.pl", - "wskr.gov.pl", - "kwp.gov.pl", - "mw.gov.pl", - "ug.gov.pl", - "um.gov.pl", - "umig.gov.pl", - "ugim.gov.pl", - "upow.gov.pl", - "uw.gov.pl", - "starostwo.gov.pl", - "pa.gov.pl", - "po.gov.pl", - "psse.gov.pl", - "pup.gov.pl", - "rzgw.gov.pl", - "sa.gov.pl", - "so.gov.pl", - "sr.gov.pl", - "wsa.gov.pl", - "sko.gov.pl", - "uzs.gov.pl", - "wiih.gov.pl", - "winb.gov.pl", - "pinb.gov.pl", - "wios.gov.pl", - "witd.gov.pl", - "wzmiuw.gov.pl", - "piw.gov.pl", - "wiw.gov.pl", - "griw.gov.pl", - "wif.gov.pl", - "oum.gov.pl", - "sdn.gov.pl", - "zp.gov.pl", - "uppo.gov.pl", - "mup.gov.pl", - "wuoz.gov.pl", - "konsulat.gov.pl", - "oirm.gov.pl", - "augustow.pl", - "babia-gora.pl", - "bedzin.pl", - "beskidy.pl", - "bialowieza.pl", - "bialystok.pl", - "bielawa.pl", - "bieszczady.pl", - "boleslawiec.pl", - "bydgoszcz.pl", - "bytom.pl", - "cieszyn.pl", - "czeladz.pl", - "czest.pl", - "dlugoleka.pl", - "elblag.pl", - "elk.pl", - "glogow.pl", - "gniezno.pl", - "gorlice.pl", - "grajewo.pl", - "ilawa.pl", - "jaworzno.pl", - "jelenia-gora.pl", - "jgora.pl", - "kalisz.pl", - "kazimierz-dolny.pl", - "karpacz.pl", - "kartuzy.pl", - "kaszuby.pl", - "katowice.pl", - "kepno.pl", - "ketrzyn.pl", - "klodzko.pl", - "kobierzyce.pl", - "kolobrzeg.pl", - "konin.pl", - "konskowola.pl", - "kutno.pl", - "lapy.pl", - "lebork.pl", - "legnica.pl", - "lezajsk.pl", - "limanowa.pl", - "lomza.pl", - "lowicz.pl", - "lubin.pl", - "lukow.pl", - "malbork.pl", - "malopolska.pl", - "mazowsze.pl", - "mazury.pl", - "mielec.pl", - "mielno.pl", - "mragowo.pl", - "naklo.pl", - "nowaruda.pl", - "nysa.pl", - "olawa.pl", - "olecko.pl", - "olkusz.pl", - "olsztyn.pl", - "opoczno.pl", - "opole.pl", - "ostroda.pl", - "ostroleka.pl", - "ostrowiec.pl", - "ostrowwlkp.pl", - "pila.pl", - "pisz.pl", - "podhale.pl", - "podlasie.pl", - "polkowice.pl", - "pomorze.pl", - "pomorskie.pl", - "prochowice.pl", - "pruszkow.pl", - "przeworsk.pl", - "pulawy.pl", - "radom.pl", - "rawa-maz.pl", - "rybnik.pl", - "rzeszow.pl", - "sanok.pl", - "sejny.pl", - "slask.pl", - "slupsk.pl", - "sosnowiec.pl", - "stalowa-wola.pl", - "skoczow.pl", - "starachowice.pl", - "stargard.pl", - "suwalki.pl", - "swidnica.pl", - "swiebodzin.pl", - "swinoujscie.pl", - "szczecin.pl", - "szczytno.pl", - "tarnobrzeg.pl", - "tgory.pl", - "turek.pl", - "tychy.pl", - "ustka.pl", - "walbrzych.pl", - "warmia.pl", - "warszawa.pl", - "waw.pl", - "wegrow.pl", - "wielun.pl", - "wlocl.pl", - "wloclawek.pl", - "wodzislaw.pl", - "wolomin.pl", - "wroclaw.pl", - "zachpomor.pl", - "zagan.pl", - "zarow.pl", - "zgora.pl", - "zgorzelec.pl", - "pm", - "pn", - "gov.pn", - "co.pn", - "org.pn", - "edu.pn", - "net.pn", - "post", - "pr", - "com.pr", - "net.pr", - "org.pr", - "gov.pr", - "edu.pr", - "isla.pr", - "pro.pr", - "biz.pr", - "info.pr", - "name.pr", - "est.pr", - "prof.pr", - "ac.pr", - "pro", - "aaa.pro", - "aca.pro", - "acct.pro", - "avocat.pro", - "bar.pro", - "cpa.pro", - "eng.pro", - "jur.pro", - "law.pro", - "med.pro", - "recht.pro", - "ps", - "edu.ps", - "gov.ps", - "sec.ps", - "plo.ps", - "com.ps", - "org.ps", - "net.ps", - "pt", - "net.pt", - "gov.pt", - "org.pt", - "edu.pt", - "int.pt", - "publ.pt", - "com.pt", - "nome.pt", - "pw", - "co.pw", - "ne.pw", - "or.pw", - "ed.pw", - "go.pw", - "belau.pw", - "py", - "com.py", - "coop.py", - "edu.py", - "gov.py", - "mil.py", - "net.py", - "org.py", - "qa", - "com.qa", - "edu.qa", - "gov.qa", - "mil.qa", - "name.qa", - "net.qa", - "org.qa", - "sch.qa", - "re", - "asso.re", - "com.re", - "nom.re", - "ro", - "arts.ro", - "com.ro", - "firm.ro", - "info.ro", - "nom.ro", - "nt.ro", - "org.ro", - "rec.ro", - "store.ro", - "tm.ro", - "www.ro", - "rs", - "ac.rs", - "co.rs", - "edu.rs", - "gov.rs", - "in.rs", - "org.rs", - "ru", - "ac.ru", - "edu.ru", - "gov.ru", - "int.ru", - "mil.ru", - "test.ru", - "rw", - "gov.rw", - "net.rw", - "edu.rw", - "ac.rw", - "com.rw", - "co.rw", - "int.rw", - "mil.rw", - "gouv.rw", - "sa", - "com.sa", - "net.sa", - "org.sa", - "gov.sa", - "med.sa", - "pub.sa", - "edu.sa", - "sch.sa", - "sb", - "com.sb", - "edu.sb", - "gov.sb", - "net.sb", - "org.sb", - "sc", - "com.sc", - "gov.sc", - "net.sc", - "org.sc", - "edu.sc", - "sd", - "com.sd", - "net.sd", - "org.sd", - "edu.sd", - "med.sd", - "tv.sd", - "gov.sd", - "info.sd", - "se", - "a.se", - "ac.se", - "b.se", - "bd.se", - "brand.se", - "c.se", - "d.se", - "e.se", - "f.se", - "fh.se", - "fhsk.se", - "fhv.se", - "g.se", - "h.se", - "i.se", - "k.se", - "komforb.se", - "kommunalforbund.se", - "komvux.se", - "l.se", - "lanbib.se", - "m.se", - "n.se", - "naturbruksgymn.se", - "o.se", - "org.se", - "p.se", - "parti.se", - "pp.se", - "press.se", - "r.se", - "s.se", - "t.se", - "tm.se", - "u.se", - "w.se", - "x.se", - "y.se", - "z.se", - "sg", - "com.sg", - "net.sg", - "org.sg", - "gov.sg", - "edu.sg", - "per.sg", - "sh", - "com.sh", - "net.sh", - "gov.sh", - "org.sh", - "mil.sh", - "si", - "sj", - "sk", - "sl", - "com.sl", - "net.sl", - "edu.sl", - "gov.sl", - "org.sl", - "sm", - "sn", - "art.sn", - "com.sn", - "edu.sn", - "gouv.sn", - "org.sn", - "perso.sn", - "univ.sn", - "so", - "com.so", - "net.so", - "org.so", - "sr", - "st", - "co.st", - "com.st", - "consulado.st", - "edu.st", - "embaixada.st", - "gov.st", - "mil.st", - "net.st", - "org.st", - "principe.st", - "saotome.st", - "store.st", - "su", - "sv", - "com.sv", - "edu.sv", - "gob.sv", - "org.sv", - "red.sv", - "sx", - "gov.sx", - "sy", - "edu.sy", - "gov.sy", - "net.sy", - "mil.sy", - "com.sy", - "org.sy", - "sz", - "co.sz", - "ac.sz", - "org.sz", - "tc", - "td", - "tel", - "tf", - "tg", - "th", - "ac.th", - "co.th", - "go.th", - "in.th", - "mi.th", - "net.th", - "or.th", - "tj", - "ac.tj", - "biz.tj", - "co.tj", - "com.tj", - "edu.tj", - "go.tj", - "gov.tj", - "int.tj", - "mil.tj", - "name.tj", - "net.tj", - "nic.tj", - "org.tj", - "test.tj", - "web.tj", - "tk", - "tl", - "gov.tl", - "tm", - "com.tm", - "co.tm", - "org.tm", - "net.tm", - "nom.tm", - "gov.tm", - "mil.tm", - "edu.tm", - "tn", - "com.tn", - "ens.tn", - "fin.tn", - "gov.tn", - "ind.tn", - "intl.tn", - "nat.tn", - "net.tn", - "org.tn", - "info.tn", - "perso.tn", - "tourism.tn", - "edunet.tn", - "rnrt.tn", - "rns.tn", - "rnu.tn", - "mincom.tn", - "agrinet.tn", - "defense.tn", - "turen.tn", - "to", - "com.to", - "gov.to", - "net.to", - "org.to", - "edu.to", - "mil.to", - "tr", - "com.tr", - "info.tr", - "biz.tr", - "net.tr", - "org.tr", - "web.tr", - "gen.tr", - "tv.tr", - "av.tr", - "dr.tr", - "bbs.tr", - "name.tr", - "tel.tr", - "gov.tr", - "bel.tr", - "pol.tr", - "mil.tr", - "k12.tr", - "edu.tr", - "kep.tr", - "nc.tr", - "gov.nc.tr", - "travel", - "tt", - "co.tt", - "com.tt", - "org.tt", - "net.tt", - "biz.tt", - "info.tt", - "pro.tt", - "int.tt", - "coop.tt", - "jobs.tt", - "mobi.tt", - "travel.tt", - "museum.tt", - "aero.tt", - "name.tt", - "gov.tt", - "edu.tt", - "tv", - "tw", - "edu.tw", - "gov.tw", - "mil.tw", - "com.tw", - "net.tw", - "org.tw", - "idv.tw", - "game.tw", - "ebiz.tw", - "club.tw", - "xn--zf0ao64a.tw", - "xn--uc0atv.tw", - "xn--czrw28b.tw", - "tz", - "ac.tz", - "co.tz", - "go.tz", - "hotel.tz", - "info.tz", - "me.tz", - "mil.tz", - "mobi.tz", - "ne.tz", - "or.tz", - "sc.tz", - "tv.tz", - "ua", - "com.ua", - "edu.ua", - "gov.ua", - "in.ua", - "net.ua", - "org.ua", - "cherkassy.ua", - "cherkasy.ua", - "chernigov.ua", - "chernihiv.ua", - "chernivtsi.ua", - "chernovtsy.ua", - "ck.ua", - "cn.ua", - "cr.ua", - "crimea.ua", - "cv.ua", - "dn.ua", - "dnepropetrovsk.ua", - "dnipropetrovsk.ua", - "dominic.ua", - "donetsk.ua", - "dp.ua", - "if.ua", - "ivano-frankivsk.ua", - "kh.ua", - "kharkiv.ua", - "kharkov.ua", - "kherson.ua", - "khmelnitskiy.ua", - "khmelnytskyi.ua", - "kiev.ua", - "kirovograd.ua", - "km.ua", - "kr.ua", - "krym.ua", - "ks.ua", - "kv.ua", - "kyiv.ua", - "lg.ua", - "lt.ua", - "lugansk.ua", - "lutsk.ua", - "lv.ua", - "lviv.ua", - "mk.ua", - "mykolaiv.ua", - "nikolaev.ua", - "od.ua", - "odesa.ua", - "odessa.ua", - "pl.ua", - "poltava.ua", - "rivne.ua", - "rovno.ua", - "rv.ua", - "sb.ua", - "sebastopol.ua", - "sevastopol.ua", - "sm.ua", - "sumy.ua", - "te.ua", - "ternopil.ua", - "uz.ua", - "uzhgorod.ua", - "vinnica.ua", - "vinnytsia.ua", - "vn.ua", - "volyn.ua", - "yalta.ua", - "zaporizhzhe.ua", - "zaporizhzhia.ua", - "zhitomir.ua", - "zhytomyr.ua", - "zp.ua", - "zt.ua", - "ug", - "co.ug", - "or.ug", - "ac.ug", - "sc.ug", - "go.ug", - "ne.ug", - "com.ug", - "org.ug", - "uk", - "ac.uk", - "co.uk", - "gov.uk", - "ltd.uk", - "me.uk", - "net.uk", - "nhs.uk", - "org.uk", - "plc.uk", - "police.uk", - "*.sch.uk", - "us", - "dni.us", - "fed.us", - "isa.us", - "kids.us", - "nsn.us", - "ak.us", - "al.us", - "ar.us", - "as.us", - "az.us", - "ca.us", - "co.us", - "ct.us", - "dc.us", - "de.us", - "fl.us", - "ga.us", - "gu.us", - "hi.us", - "ia.us", - "id.us", - "il.us", - "in.us", - "ks.us", - "ky.us", - "la.us", - "ma.us", - "md.us", - "me.us", - "mi.us", - "mn.us", - "mo.us", - "ms.us", - "mt.us", - "nc.us", - "nd.us", - "ne.us", - "nh.us", - "nj.us", - "nm.us", - "nv.us", - "ny.us", - "oh.us", - "ok.us", - "or.us", - "pa.us", - "pr.us", - "ri.us", - "sc.us", - "sd.us", - "tn.us", - "tx.us", - "ut.us", - "vi.us", - "vt.us", - "va.us", - "wa.us", - "wi.us", - "wv.us", - "wy.us", - "k12.ak.us", - "k12.al.us", - "k12.ar.us", - "k12.as.us", - "k12.az.us", - "k12.ca.us", - "k12.co.us", - "k12.ct.us", - "k12.dc.us", - "k12.de.us", - "k12.fl.us", - "k12.ga.us", - "k12.gu.us", - "k12.ia.us", - "k12.id.us", - "k12.il.us", - "k12.in.us", - "k12.ks.us", - "k12.ky.us", - "k12.la.us", - "k12.ma.us", - "k12.md.us", - "k12.me.us", - "k12.mi.us", - "k12.mn.us", - "k12.mo.us", - "k12.ms.us", - "k12.mt.us", - "k12.nc.us", - "k12.ne.us", - "k12.nh.us", - "k12.nj.us", - "k12.nm.us", - "k12.nv.us", - "k12.ny.us", - "k12.oh.us", - "k12.ok.us", - "k12.or.us", - "k12.pa.us", - "k12.pr.us", - "k12.ri.us", - "k12.sc.us", - "k12.tn.us", - "k12.tx.us", - "k12.ut.us", - "k12.vi.us", - "k12.vt.us", - "k12.va.us", - "k12.wa.us", - "k12.wi.us", - "k12.wy.us", - "cc.ak.us", - "cc.al.us", - "cc.ar.us", - "cc.as.us", - "cc.az.us", - "cc.ca.us", - "cc.co.us", - "cc.ct.us", - "cc.dc.us", - "cc.de.us", - "cc.fl.us", - "cc.ga.us", - "cc.gu.us", - "cc.hi.us", - "cc.ia.us", - "cc.id.us", - "cc.il.us", - "cc.in.us", - "cc.ks.us", - "cc.ky.us", - "cc.la.us", - "cc.ma.us", - "cc.md.us", - "cc.me.us", - "cc.mi.us", - "cc.mn.us", - "cc.mo.us", - "cc.ms.us", - "cc.mt.us", - "cc.nc.us", - "cc.nd.us", - "cc.ne.us", - "cc.nh.us", - "cc.nj.us", - "cc.nm.us", - "cc.nv.us", - "cc.ny.us", - "cc.oh.us", - "cc.ok.us", - "cc.or.us", - "cc.pa.us", - "cc.pr.us", - "cc.ri.us", - "cc.sc.us", - "cc.sd.us", - "cc.tn.us", - "cc.tx.us", - "cc.ut.us", - "cc.vi.us", - "cc.vt.us", - "cc.va.us", - "cc.wa.us", - "cc.wi.us", - "cc.wv.us", - "cc.wy.us", - "lib.ak.us", - "lib.al.us", - "lib.ar.us", - "lib.as.us", - "lib.az.us", - "lib.ca.us", - "lib.co.us", - "lib.ct.us", - "lib.dc.us", - "lib.fl.us", - "lib.ga.us", - "lib.gu.us", - "lib.hi.us", - "lib.ia.us", - "lib.id.us", - "lib.il.us", - "lib.in.us", - "lib.ks.us", - "lib.ky.us", - "lib.la.us", - "lib.ma.us", - "lib.md.us", - "lib.me.us", - "lib.mi.us", - "lib.mn.us", - "lib.mo.us", - "lib.ms.us", - "lib.mt.us", - "lib.nc.us", - "lib.nd.us", - "lib.ne.us", - "lib.nh.us", - "lib.nj.us", - "lib.nm.us", - "lib.nv.us", - "lib.ny.us", - "lib.oh.us", - "lib.ok.us", - "lib.or.us", - "lib.pa.us", - "lib.pr.us", - "lib.ri.us", - "lib.sc.us", - "lib.sd.us", - "lib.tn.us", - "lib.tx.us", - "lib.ut.us", - "lib.vi.us", - "lib.vt.us", - "lib.va.us", - "lib.wa.us", - "lib.wi.us", - "lib.wy.us", - "pvt.k12.ma.us", - "chtr.k12.ma.us", - "paroch.k12.ma.us", - "ann-arbor.mi.us", - "cog.mi.us", - "dst.mi.us", - "eaton.mi.us", - "gen.mi.us", - "mus.mi.us", - "tec.mi.us", - "washtenaw.mi.us", - "uy", - "com.uy", - "edu.uy", - "gub.uy", - "mil.uy", - "net.uy", - "org.uy", - "uz", - "co.uz", - "com.uz", - "net.uz", - "org.uz", - "va", - "vc", - "com.vc", - "net.vc", - "org.vc", - "gov.vc", - "mil.vc", - "edu.vc", - "ve", - "arts.ve", - "co.ve", - "com.ve", - "e12.ve", - "edu.ve", - "firm.ve", - "gob.ve", - "gov.ve", - "info.ve", - "int.ve", - "mil.ve", - "net.ve", - "org.ve", - "rec.ve", - "store.ve", - "tec.ve", - "web.ve", - "vg", - "vi", - "co.vi", - "com.vi", - "k12.vi", - "net.vi", - "org.vi", - "vn", - "com.vn", - "net.vn", - "org.vn", - "edu.vn", - "gov.vn", - "int.vn", - "ac.vn", - "biz.vn", - "info.vn", - "name.vn", - "pro.vn", - "health.vn", - "vu", - "com.vu", - "edu.vu", - "net.vu", - "org.vu", - "wf", - "ws", - "com.ws", - "net.ws", - "org.ws", - "gov.ws", - "edu.ws", - "yt", - "xn--mgbaam7a8h", - "xn--y9a3aq", - "xn--54b7fta0cc", - "xn--90ae", - "xn--90ais", - "xn--fiqs8s", - "xn--fiqz9s", - "xn--lgbbat1ad8j", - "xn--wgbh1c", - "xn--e1a4c", - "xn--node", - "xn--qxam", - "xn--j6w193g", - "xn--2scrj9c", - "xn--3hcrj9c", - "xn--45br5cyl", - "xn--h2breg3eve", - "xn--h2brj9c8c", - "xn--mgbgu82a", - "xn--rvc1e0am3e", - "xn--h2brj9c", - "xn--mgbbh1a71e", - "xn--fpcrj9c3d", - "xn--gecrj9c", - "xn--s9brj9c", - "xn--45brj9c", - "xn--xkc2dl3a5ee0h", - "xn--mgba3a4f16a", - "xn--mgba3a4fra", - "xn--mgbtx2b", - "xn--mgbayh7gpa", - "xn--3e0b707e", - "xn--80ao21a", - "xn--fzc2c9e2c", - "xn--xkc2al3hye2a", - "xn--mgbc0a9azcg", - "xn--d1alf", - "xn--l1acc", - "xn--mix891f", - "xn--mix082f", - "xn--mgbx4cd0ab", - "xn--mgb9awbf", - "xn--mgbai9azgqp6j", - "xn--mgbai9a5eva00b", - "xn--ygbi2ammx", - "xn--90a3ac", - "xn--o1ac.xn--90a3ac", - "xn--c1avg.xn--90a3ac", - "xn--90azh.xn--90a3ac", - "xn--d1at.xn--90a3ac", - "xn--o1ach.xn--90a3ac", - "xn--80au.xn--90a3ac", - "xn--p1ai", - "xn--wgbl6a", - "xn--mgberp4a5d4ar", - "xn--mgberp4a5d4a87g", - "xn--mgbqly7c0a67fbc", - "xn--mgbqly7cvafr", - "xn--mgbpl2fh", - "xn--yfro4i67o", - "xn--clchc0ea0b2g2a9gcd", - "xn--ogbpf8fl", - "xn--mgbtf8fl", - "xn--o3cw4h", - "xn--12c1fe0br.xn--o3cw4h", - "xn--12co0c3b4eva.xn--o3cw4h", - "xn--h3cuzk1di.xn--o3cw4h", - "xn--o3cyx2a.xn--o3cw4h", - "xn--m3ch0j3a.xn--o3cw4h", - "xn--12cfi8ixb8l.xn--o3cw4h", - "xn--pgbs0dh", - "xn--kpry57d", - "xn--kprw13d", - "xn--nnx388a", - "xn--j1amh", - "xn--mgb2ddes", - "xxx", - "*.ye", - "ac.za", - "agric.za", - "alt.za", - "co.za", - "edu.za", - "gov.za", - "grondar.za", - "law.za", - "mil.za", - "net.za", - "ngo.za", - "nis.za", - "nom.za", - "org.za", - "school.za", - "tm.za", - "web.za", - "zm", - "ac.zm", - "biz.zm", - "co.zm", - "com.zm", - "edu.zm", - "gov.zm", - "info.zm", - "mil.zm", - "net.zm", - "org.zm", - "sch.zm", - "zw", - "ac.zw", - "co.zw", - "gov.zw", - "mil.zw", - "org.zw", - "aaa", - "aarp", - "abarth", - "abb", - "abbott", - "abbvie", - "abc", - "able", - "abogado", - "abudhabi", - "academy", - "accenture", - "accountant", - "accountants", - "aco", - "active", - "actor", - "adac", - "ads", - "adult", - "aeg", - "aetna", - "afamilycompany", - "afl", - "africa", - "agakhan", - "agency", - "aig", - "aigo", - "airbus", - "airforce", - "airtel", - "akdn", - "alfaromeo", - "alibaba", - "alipay", - "allfinanz", - "allstate", - "ally", - "alsace", - "alstom", - "americanexpress", - "americanfamily", - "amex", - "amfam", - "amica", - "amsterdam", - "analytics", - "android", - "anquan", - "anz", - "aol", - "apartments", - "app", - "apple", - "aquarelle", - "arab", - "aramco", - "archi", - "army", - "art", - "arte", - "asda", - "associates", - "athleta", - "attorney", - "auction", - "audi", - "audible", - "audio", - "auspost", - "author", - "auto", - "autos", - "avianca", - "aws", - "axa", - "azure", - "baby", - "baidu", - "banamex", - "bananarepublic", - "band", - "bank", - "bar", - "barcelona", - "barclaycard", - "barclays", - "barefoot", - "bargains", - "baseball", - "basketball", - "bauhaus", - "bayern", - "bbc", - "bbt", - "bbva", - "bcg", - "bcn", - "beats", - "beauty", - "beer", - "bentley", - "berlin", - "best", - "bestbuy", - "bet", - "bharti", - "bible", - "bid", - "bike", - "bing", - "bingo", - "bio", - "black", - "blackfriday", - "blanco", - "blockbuster", - "blog", - "bloomberg", - "blue", - "bms", - "bmw", - "bnl", - "bnpparibas", - "boats", - "boehringer", - "bofa", - "bom", - "bond", - "boo", - "book", - "booking", - "boots", - "bosch", - "bostik", - "boston", - "bot", - "boutique", - "box", - "bradesco", - "bridgestone", - "broadway", - "broker", - "brother", - "brussels", - "budapest", - "bugatti", - "build", - "builders", - "business", - "buy", - "buzz", - "bzh", - "cab", - "cafe", - "cal", - "call", - "calvinklein", - "cam", - "camera", - "camp", - "cancerresearch", - "canon", - "capetown", - "capital", - "capitalone", - "car", - "caravan", - "cards", - "care", - "career", - "careers", - "cars", - "cartier", - "casa", - "case", - "caseih", - "cash", - "casino", - "catering", - "catholic", - "cba", - "cbn", - "cbre", - "cbs", - "ceb", - "center", - "ceo", - "cern", - "cfa", - "cfd", - "chanel", - "channel", - "chase", - "chat", - "cheap", - "chintai", - "chloe", - "christmas", - "chrome", - "chrysler", - "church", - "cipriani", - "circle", - "cisco", - "citadel", - "citi", - "citic", - "city", - "cityeats", - "claims", - "cleaning", - "click", - "clinic", - "clinique", - "clothing", - "cloud", - "club", - "clubmed", - "coach", - "codes", - "coffee", - "college", - "cologne", - "comcast", - "commbank", - "community", - "company", - "compare", - "computer", - "comsec", - "condos", - "construction", - "consulting", - "contact", - "contractors", - "cooking", - "cookingchannel", - "cool", - "corsica", - "country", - "coupon", - "coupons", - "courses", - "credit", - "creditcard", - "creditunion", - "cricket", - "crown", - "crs", - "cruise", - "cruises", - "csc", - "cuisinella", - "cymru", - "cyou", - "dabur", - "dad", - "dance", - "data", - "date", - "dating", - "datsun", - "day", - "dclk", - "dds", - "deal", - "dealer", - "deals", - "degree", - "delivery", - "dell", - "deloitte", - "delta", - "democrat", - "dental", - "dentist", - "desi", - "design", - "dev", - "dhl", - "diamonds", - "diet", - "digital", - "direct", - "directory", - "discount", - "discover", - "dish", - "diy", - "dnp", - "docs", - "doctor", - "dodge", - "dog", - "doha", - "domains", - "dot", - "download", - "drive", - "dtv", - "dubai", - "duck", - "dunlop", - "duns", - "dupont", - "durban", - "dvag", - "dvr", - "earth", - "eat", - "eco", - "edeka", - "education", - "email", - "emerck", - "energy", - "engineer", - "engineering", - "enterprises", - "epost", - "epson", - "equipment", - "ericsson", - "erni", - "esq", - "estate", - "esurance", - "etisalat", - "eurovision", - "eus", - "events", - "everbank", - "exchange", - "expert", - "exposed", - "express", - "extraspace", - "fage", - "fail", - "fairwinds", - "faith", - "family", - "fan", - "fans", - "farm", - "farmers", - "fashion", - "fast", - "fedex", - "feedback", - "ferrari", - "ferrero", - "fiat", - "fidelity", - "fido", - "film", - "final", - "finance", - "financial", - "fire", - "firestone", - "firmdale", - "fish", - "fishing", - "fit", - "fitness", - "flickr", - "flights", - "flir", - "florist", - "flowers", - "fly", - "foo", - "food", - "foodnetwork", - "football", - "ford", - "forex", - "forsale", - "forum", - "foundation", - "fox", - "free", - "fresenius", - "frl", - "frogans", - "frontdoor", - "frontier", - "ftr", - "fujitsu", - "fujixerox", - "fun", - "fund", - "furniture", - "futbol", - "fyi", - "gal", - "gallery", - "gallo", - "gallup", - "game", - "games", - "gap", - "garden", - "gbiz", - "gdn", - "gea", - "gent", - "genting", - "george", - "ggee", - "gift", - "gifts", - "gives", - "giving", - "glade", - "glass", - "gle", - "global", - "globo", - "gmail", - "gmbh", - "gmo", - "gmx", - "godaddy", - "gold", - "goldpoint", - "golf", - "goo", - "goodhands", - "goodyear", - "goog", - "google", - "gop", - "got", - "grainger", - "graphics", - "gratis", - "green", - "gripe", - "grocery", - "group", - "guardian", - "gucci", - "guge", - "guide", - "guitars", - "guru", - "hair", - "hamburg", - "hangout", - "haus", - "hbo", - "hdfc", - "hdfcbank", - "health", - "healthcare", - "help", - "helsinki", - "here", - "hermes", - "hgtv", - "hiphop", - "hisamitsu", - "hitachi", - "hiv", - "hkt", - "hockey", - "holdings", - "holiday", - "homedepot", - "homegoods", - "homes", - "homesense", - "honda", - "honeywell", - "horse", - "hospital", - "host", - "hosting", - "hot", - "hoteles", - "hotels", - "hotmail", - "house", - "how", - "hsbc", - "htc", - "hughes", - "hyatt", - "hyundai", - "ibm", - "icbc", - "ice", - "icu", - "ieee", - "ifm", - "ikano", - "imamat", - "imdb", - "immo", - "immobilien", - "industries", - "infiniti", - "ing", - "ink", - "institute", - "insurance", - "insure", - "intel", - "international", - "intuit", - "investments", - "ipiranga", - "irish", - "iselect", - "ismaili", - "ist", - "istanbul", - "itau", - "itv", - "iveco", - "iwc", - "jaguar", - "java", - "jcb", - "jcp", - "jeep", - "jetzt", - "jewelry", - "jio", - "jlc", - "jll", - "jmp", - "jnj", - "joburg", - "jot", - "joy", - "jpmorgan", - "jprs", - "juegos", - "juniper", - "kaufen", - "kddi", - "kerryhotels", - "kerrylogistics", - "kerryproperties", - "kfh", - "kia", - "kim", - "kinder", - "kindle", - "kitchen", - "kiwi", - "koeln", - "komatsu", - "kosher", - "kpmg", - "kpn", - "krd", - "kred", - "kuokgroup", - "kyoto", - "lacaixa", - "ladbrokes", - "lamborghini", - "lamer", - "lancaster", - "lancia", - "lancome", - "land", - "landrover", - "lanxess", - "lasalle", - "lat", - "latino", - "latrobe", - "law", - "lawyer", - "lds", - "lease", - "leclerc", - "lefrak", - "legal", - "lego", - "lexus", - "lgbt", - "liaison", - "lidl", - "life", - "lifeinsurance", - "lifestyle", - "lighting", - "like", - "lilly", - "limited", - "limo", - "lincoln", - "linde", - "link", - "lipsy", - "live", - "living", - "lixil", - "loan", - "loans", - "locker", - "locus", - "loft", - "lol", - "london", - "lotte", - "lotto", - "love", - "lpl", - "lplfinancial", - "ltd", - "ltda", - "lundbeck", - "lupin", - "luxe", - "luxury", - "macys", - "madrid", - "maif", - "maison", - "makeup", - "man", - "management", - "mango", - "map", - "market", - "marketing", - "markets", - "marriott", - "marshalls", - "maserati", - "mattel", - "mba", - "mcd", - "mcdonalds", - "mckinsey", - "med", - "media", - "meet", - "melbourne", - "meme", - "memorial", - "men", - "menu", - "meo", - "merckmsd", - "metlife", - "miami", - "microsoft", - "mini", - "mint", - "mit", - "mitsubishi", - "mlb", - "mls", - "mma", - "mobile", - "mobily", - "moda", - "moe", - "moi", - "mom", - "monash", - "money", - "monster", - "montblanc", - "mopar", - "mormon", - "mortgage", - "moscow", - "moto", - "motorcycles", - "mov", - "movie", - "movistar", - "msd", - "mtn", - "mtpc", - "mtr", - "mutual", - "nab", - "nadex", - "nagoya", - "nationwide", - "natura", - "navy", - "nba", - "nec", - "netbank", - "netflix", - "network", - "neustar", - "new", - "newholland", - "news", - "next", - "nextdirect", - "nexus", - "nfl", - "ngo", - "nhk", - "nico", - "nike", - "nikon", - "ninja", - "nissan", - "nissay", - "nokia", - "northwesternmutual", - "norton", - "now", - "nowruz", - "nowtv", - "nra", - "nrw", - "ntt", - "nyc", - "obi", - "observer", - "off", - "office", - "okinawa", - "olayan", - "olayangroup", - "oldnavy", - "ollo", - "omega", - "one", - "ong", - "onl", - "online", - "onyourside", - "ooo", - "open", - "oracle", - "orange", - "organic", - "origins", - "osaka", - "otsuka", - "ott", - "ovh", - "page", - "pamperedchef", - "panasonic", - "panerai", - "paris", - "pars", - "partners", - "parts", - "party", - "passagens", - "pay", - "pccw", - "pet", - "pfizer", - "pharmacy", - "phd", - "philips", - "phone", - "photo", - "photography", - "photos", - "physio", - "piaget", - "pics", - "pictet", - "pictures", - "pid", - "pin", - "ping", - "pink", - "pioneer", - "pizza", - "place", - "play", - "playstation", - "plumbing", - "plus", - "pnc", - "pohl", - "poker", - "politie", - "porn", - "pramerica", - "praxi", - "press", - "prime", - "prod", - "productions", - "prof", - "progressive", - "promo", - "properties", - "property", - "protection", - "pru", - "prudential", - "pub", - "pwc", - "qpon", - "quebec", - "quest", - "qvc", - "racing", - "radio", - "raid", - "read", - "realestate", - "realtor", - "realty", - "recipes", - "red", - "redstone", - "redumbrella", - "rehab", - "reise", - "reisen", - "reit", - "reliance", - "ren", - "rent", - "rentals", - "repair", - "report", - "republican", - "rest", - "restaurant", - "review", - "reviews", - "rexroth", - "rich", - "richardli", - "ricoh", - "rightathome", - "ril", - "rio", - "rip", - "rmit", - "rocher", - "rocks", - "rodeo", - "rogers", - "room", - "rsvp", - "rugby", - "ruhr", - "run", - "rwe", - "ryukyu", - "saarland", - "safe", - "safety", - "sakura", - "sale", - "salon", - "samsclub", - "samsung", - "sandvik", - "sandvikcoromant", - "sanofi", - "sap", - "sapo", - "sarl", - "sas", - "save", - "saxo", - "sbi", - "sbs", - "sca", - "scb", - "schaeffler", - "schmidt", - "scholarships", - "school", - "schule", - "schwarz", - "science", - "scjohnson", - "scor", - "scot", - "search", - "seat", - "secure", - "security", - "seek", - "select", - "sener", - "services", - "ses", - "seven", - "sew", - "sex", - "sexy", - "sfr", - "shangrila", - "sharp", - "shaw", - "shell", - "shia", - "shiksha", - "shoes", - "shop", - "shopping", - "shouji", - "show", - "showtime", - "shriram", - "silk", - "sina", - "singles", - "site", - "ski", - "skin", - "sky", - "skype", - "sling", - "smart", - "smile", - "sncf", - "soccer", - "social", - "softbank", - "software", - "sohu", - "solar", - "solutions", - "song", - "sony", - "soy", - "space", - "spiegel", - "spot", - "spreadbetting", - "srl", - "srt", - "stada", - "staples", - "star", - "starhub", - "statebank", - "statefarm", - "statoil", - "stc", - "stcgroup", - "stockholm", - "storage", - "store", - "stream", - "studio", - "study", - "style", - "sucks", - "supplies", - "supply", - "support", - "surf", - "surgery", - "suzuki", - "swatch", - "swiftcover", - "swiss", - "sydney", - "symantec", - "systems", - "tab", - "taipei", - "talk", - "taobao", - "target", - "tatamotors", - "tatar", - "tattoo", - "tax", - "taxi", - "tci", - "tdk", - "team", - "tech", - "technology", - "telecity", - "telefonica", - "temasek", - "tennis", - "teva", - "thd", - "theater", - "theatre", - "tiaa", - "tickets", - "tienda", - "tiffany", - "tips", - "tires", - "tirol", - "tjmaxx", - "tjx", - "tkmaxx", - "tmall", - "today", - "tokyo", - "tools", - "top", - "toray", - "toshiba", - "total", - "tours", - "town", - "toyota", - "toys", - "trade", - "trading", - "training", - "travelchannel", - "travelers", - "travelersinsurance", - "trust", - "trv", - "tube", - "tui", - "tunes", - "tushu", - "tvs", - "ubank", - "ubs", - "uconnect", - "unicom", - "university", - "uno", - "uol", - "ups", - "vacations", - "vana", - "vanguard", - "vegas", - "ventures", - "verisign", - "versicherung", - "vet", - "viajes", - "video", - "vig", - "viking", - "villas", - "vin", - "vip", - "virgin", - "visa", - "vision", - "vista", - "vistaprint", - "viva", - "vivo", - "vlaanderen", - "vodka", - "volkswagen", - "volvo", - "vote", - "voting", - "voto", - "voyage", - "vuelos", - "wales", - "walmart", - "walter", - "wang", - "wanggou", - "warman", - "watch", - "watches", - "weather", - "weatherchannel", - "webcam", - "weber", - "website", - "wed", - "wedding", - "weibo", - "weir", - "whoswho", - "wien", - "wiki", - "williamhill", - "win", - "windows", - "wine", - "winners", - "wme", - "wolterskluwer", - "woodside", - "work", - "works", - "world", - "wow", - "wtc", - "wtf", - "xbox", - "xerox", - "xfinity", - "xihuan", - "xin", - "xn--11b4c3d", - "xn--1ck2e1b", - "xn--1qqw23a", - "xn--30rr7y", - "xn--3bst00m", - "xn--3ds443g", - "xn--3oq18vl8pn36a", - "xn--3pxu8k", - "xn--42c2d9a", - "xn--45q11c", - "xn--4gbrim", - "xn--55qw42g", - "xn--55qx5d", - "xn--5su34j936bgsg", - "xn--5tzm5g", - "xn--6frz82g", - "xn--6qq986b3xl", - "xn--80adxhks", - "xn--80aqecdr1a", - "xn--80asehdb", - "xn--80aswg", - "xn--8y0a063a", - "xn--9dbq2a", - "xn--9et52u", - "xn--9krt00a", - "xn--b4w605ferd", - "xn--bck1b9a5dre4c", - "xn--c1avg", - "xn--c2br7g", - "xn--cck2b3b", - "xn--cg4bki", - "xn--czr694b", - "xn--czrs0t", - "xn--czru2d", - "xn--d1acj3b", - "xn--eckvdtc9d", - "xn--efvy88h", - "xn--estv75g", - "xn--fct429k", - "xn--fhbei", - "xn--fiq228c5hs", - "xn--fiq64b", - "xn--fjq720a", - "xn--flw351e", - "xn--fzys8d69uvgm", - "xn--g2xx48c", - "xn--gckr3f0f", - "xn--gk3at1e", - "xn--hxt814e", - "xn--i1b6b1a6a2e", - "xn--imr513n", - "xn--io0a7i", - "xn--j1aef", - "xn--jlq61u9w7b", - "xn--jvr189m", - "xn--kcrx77d1x4a", - "xn--kpu716f", - "xn--kput3i", - "xn--mgba3a3ejt", - "xn--mgba7c0bbn0a", - "xn--mgbaakc7dvf", - "xn--mgbab2bd", - "xn--mgbb9fbpob", - "xn--mgbca7dzdo", - "xn--mgbi4ecexp", - "xn--mgbt3dhd", - "xn--mk1bu44c", - "xn--mxtq1m", - "xn--ngbc5azd", - "xn--ngbe9e0a", - "xn--ngbrx", - "xn--nqv7f", - "xn--nqv7fs00ema", - "xn--nyqy26a", - "xn--p1acf", - "xn--pbt977c", - "xn--pssy2u", - "xn--q9jyb4c", - "xn--qcka1pmc", - "xn--rhqv96g", - "xn--rovu88b", - "xn--ses554g", - "xn--t60b56a", - "xn--tckwe", - "xn--tiq49xqyj", - "xn--unup4y", - "xn--vermgensberater-ctb", - "xn--vermgensberatung-pwb", - "xn--vhquv", - "xn--vuq861b", - "xn--w4r85el8fhu5dnra", - "xn--w4rs40l", - "xn--xhq521b", - "xn--zfr164b", - "xperia", - "xyz", - "yachts", - "yahoo", - "yamaxun", - "yandex", - "yodobashi", - "yoga", - "yokohama", - "you", - "youtube", - "yun", - "zappos", - "zara", - "zero", - "zip", - "zippo", - "zone", - "zuerich", - "cc.ua", - "inf.ua", - "ltd.ua", - "beep.pl", - "*.compute.estate", - "*.alces.network", - "*.alwaysdata.net", - "cloudfront.net", - "*.compute.amazonaws.com", - "*.compute-1.amazonaws.com", - "*.compute.amazonaws.com.cn", - "us-east-1.amazonaws.com", - "cn-north-1.eb.amazonaws.com.cn", - "elasticbeanstalk.com", - "ap-northeast-1.elasticbeanstalk.com", - "ap-northeast-2.elasticbeanstalk.com", - "ap-south-1.elasticbeanstalk.com", - "ap-southeast-1.elasticbeanstalk.com", - "ap-southeast-2.elasticbeanstalk.com", - "ca-central-1.elasticbeanstalk.com", - "eu-central-1.elasticbeanstalk.com", - "eu-west-1.elasticbeanstalk.com", - "eu-west-2.elasticbeanstalk.com", - "sa-east-1.elasticbeanstalk.com", - "us-east-1.elasticbeanstalk.com", - "us-east-2.elasticbeanstalk.com", - "us-gov-west-1.elasticbeanstalk.com", - "us-west-1.elasticbeanstalk.com", - "us-west-2.elasticbeanstalk.com", - "*.elb.amazonaws.com", - "*.elb.amazonaws.com.cn", - "s3.amazonaws.com", - "s3-ap-northeast-1.amazonaws.com", - "s3-ap-northeast-2.amazonaws.com", - "s3-ap-south-1.amazonaws.com", - "s3-ap-southeast-1.amazonaws.com", - "s3-ap-southeast-2.amazonaws.com", - "s3-ca-central-1.amazonaws.com", - "s3-eu-central-1.amazonaws.com", - "s3-eu-west-1.amazonaws.com", - "s3-eu-west-2.amazonaws.com", - "s3-external-1.amazonaws.com", - "s3-fips-us-gov-west-1.amazonaws.com", - "s3-sa-east-1.amazonaws.com", - "s3-us-gov-west-1.amazonaws.com", - "s3-us-east-2.amazonaws.com", - "s3-us-west-1.amazonaws.com", - "s3-us-west-2.amazonaws.com", - "s3.ap-northeast-2.amazonaws.com", - "s3.ap-south-1.amazonaws.com", - "s3.cn-north-1.amazonaws.com.cn", - "s3.ca-central-1.amazonaws.com", - "s3.eu-central-1.amazonaws.com", - "s3.eu-west-2.amazonaws.com", - "s3.us-east-2.amazonaws.com", - "s3.dualstack.ap-northeast-1.amazonaws.com", - "s3.dualstack.ap-northeast-2.amazonaws.com", - "s3.dualstack.ap-south-1.amazonaws.com", - "s3.dualstack.ap-southeast-1.amazonaws.com", - "s3.dualstack.ap-southeast-2.amazonaws.com", - "s3.dualstack.ca-central-1.amazonaws.com", - "s3.dualstack.eu-central-1.amazonaws.com", - "s3.dualstack.eu-west-1.amazonaws.com", - "s3.dualstack.eu-west-2.amazonaws.com", - "s3.dualstack.sa-east-1.amazonaws.com", - "s3.dualstack.us-east-1.amazonaws.com", - "s3.dualstack.us-east-2.amazonaws.com", - "s3-website-us-east-1.amazonaws.com", - "s3-website-us-west-1.amazonaws.com", - "s3-website-us-west-2.amazonaws.com", - "s3-website-ap-northeast-1.amazonaws.com", - "s3-website-ap-southeast-1.amazonaws.com", - "s3-website-ap-southeast-2.amazonaws.com", - "s3-website-eu-west-1.amazonaws.com", - "s3-website-sa-east-1.amazonaws.com", - "s3-website.ap-northeast-2.amazonaws.com", - "s3-website.ap-south-1.amazonaws.com", - "s3-website.ca-central-1.amazonaws.com", - "s3-website.eu-central-1.amazonaws.com", - "s3-website.eu-west-2.amazonaws.com", - "s3-website.us-east-2.amazonaws.com", - "t3l3p0rt.net", - "tele.amune.org", - "on-aptible.com", - "user.party.eus", - "pimienta.org", - "poivron.org", - "potager.org", - "sweetpepper.org", - "myasustor.com", - "myfritz.net", - "*.awdev.ca", - "*.advisor.ws", - "backplaneapp.io", - "betainabox.com", - "bnr.la", - "boomla.net", - "boxfuse.io", - "square7.ch", - "bplaced.com", - "bplaced.de", - "square7.de", - "bplaced.net", - "square7.net", - "browsersafetymark.io", - "mycd.eu", - "ae.org", - "ar.com", - "br.com", - "cn.com", - "com.de", - "com.se", - "de.com", - "eu.com", - "gb.com", - "gb.net", - "hu.com", - "hu.net", - "jp.net", - "jpn.com", - "kr.com", - "mex.com", - "no.com", - "qc.com", - "ru.com", - "sa.com", - "se.com", - "se.net", - "uk.com", - "uk.net", - "us.com", - "uy.com", - "za.bz", - "za.com", - "africa.com", - "gr.com", - "in.net", - "us.org", - "co.com", - "c.la", - "certmgr.org", - "xenapponazure.com", - "virtueeldomein.nl", - "c66.me", - "jdevcloud.com", - "wpdevcloud.com", - "cloudaccess.host", - "freesite.host", - "cloudaccess.net", - "cloudcontrolled.com", - "cloudcontrolapp.com", - "co.ca", - "co.cz", - "c.cdn77.org", - "cdn77-ssl.net", - "r.cdn77.net", - "rsc.cdn77.org", - "ssl.origin.cdn77-secure.org", - "cloudns.asia", - "cloudns.biz", - "cloudns.club", - "cloudns.cc", - "cloudns.eu", - "cloudns.in", - "cloudns.info", - "cloudns.org", - "cloudns.pro", - "cloudns.pw", - "cloudns.us", - "co.nl", - "co.no", - "dyn.cosidns.de", - "dynamisches-dns.de", - "dnsupdater.de", - "internet-dns.de", - "l-o-g-i-n.de", - "dynamic-dns.info", - "feste-ip.net", - "knx-server.net", - "static-access.net", - "realm.cz", - "*.cryptonomic.net", - "cupcake.is", - "cyon.link", - "cyon.site", - "daplie.me", - "localhost.daplie.me", - "biz.dk", - "co.dk", - "firm.dk", - "reg.dk", - "store.dk", - "debian.net", - "dedyn.io", - "dnshome.de", - "drayddns.com", - "dreamhosters.com", - "mydrobo.com", - "drud.io", - "drud.us", - "duckdns.org", - "dy.fi", - "tunk.org", - "dyndns-at-home.com", - "dyndns-at-work.com", - "dyndns-blog.com", - "dyndns-free.com", - "dyndns-home.com", - "dyndns-ip.com", - "dyndns-mail.com", - "dyndns-office.com", - "dyndns-pics.com", - "dyndns-remote.com", - "dyndns-server.com", - "dyndns-web.com", - "dyndns-wiki.com", - "dyndns-work.com", - "dyndns.biz", - "dyndns.info", - "dyndns.org", - "dyndns.tv", - "at-band-camp.net", - "ath.cx", - "barrel-of-knowledge.info", - "barrell-of-knowledge.info", - "better-than.tv", - "blogdns.com", - "blogdns.net", - "blogdns.org", - "blogsite.org", - "boldlygoingnowhere.org", - "broke-it.net", - "buyshouses.net", - "cechire.com", - "dnsalias.com", - "dnsalias.net", - "dnsalias.org", - "dnsdojo.com", - "dnsdojo.net", - "dnsdojo.org", - "does-it.net", - "doesntexist.com", - "doesntexist.org", - "dontexist.com", - "dontexist.net", - "dontexist.org", - "doomdns.com", - "doomdns.org", - "dvrdns.org", - "dyn-o-saur.com", - "dynalias.com", - "dynalias.net", - "dynalias.org", - "dynathome.net", - "dyndns.ws", - "endofinternet.net", - "endofinternet.org", - "endoftheinternet.org", - "est-a-la-maison.com", - "est-a-la-masion.com", - "est-le-patron.com", - "est-mon-blogueur.com", - "for-better.biz", - "for-more.biz", - "for-our.info", - "for-some.biz", - "for-the.biz", - "forgot.her.name", - "forgot.his.name", - "from-ak.com", - "from-al.com", - "from-ar.com", - "from-az.net", - "from-ca.com", - "from-co.net", - "from-ct.com", - "from-dc.com", - "from-de.com", - "from-fl.com", - "from-ga.com", - "from-hi.com", - "from-ia.com", - "from-id.com", - "from-il.com", - "from-in.com", - "from-ks.com", - "from-ky.com", - "from-la.net", - "from-ma.com", - "from-md.com", - "from-me.org", - "from-mi.com", - "from-mn.com", - "from-mo.com", - "from-ms.com", - "from-mt.com", - "from-nc.com", - "from-nd.com", - "from-ne.com", - "from-nh.com", - "from-nj.com", - "from-nm.com", - "from-nv.com", - "from-ny.net", - "from-oh.com", - "from-ok.com", - "from-or.com", - "from-pa.com", - "from-pr.com", - "from-ri.com", - "from-sc.com", - "from-sd.com", - "from-tn.com", - "from-tx.com", - "from-ut.com", - "from-va.com", - "from-vt.com", - "from-wa.com", - "from-wi.com", - "from-wv.com", - "from-wy.com", - "ftpaccess.cc", - "fuettertdasnetz.de", - "game-host.org", - "game-server.cc", - "getmyip.com", - "gets-it.net", - "go.dyndns.org", - "gotdns.com", - "gotdns.org", - "groks-the.info", - "groks-this.info", - "ham-radio-op.net", - "here-for-more.info", - "hobby-site.com", - "hobby-site.org", - "home.dyndns.org", - "homedns.org", - "homeftp.net", - "homeftp.org", - "homeip.net", - "homelinux.com", - "homelinux.net", - "homelinux.org", - "homeunix.com", - "homeunix.net", - "homeunix.org", - "iamallama.com", - "in-the-band.net", - "is-a-anarchist.com", - "is-a-blogger.com", - "is-a-bookkeeper.com", - "is-a-bruinsfan.org", - "is-a-bulls-fan.com", - "is-a-candidate.org", - "is-a-caterer.com", - "is-a-celticsfan.org", - "is-a-chef.com", - "is-a-chef.net", - "is-a-chef.org", - "is-a-conservative.com", - "is-a-cpa.com", - "is-a-cubicle-slave.com", - "is-a-democrat.com", - "is-a-designer.com", - "is-a-doctor.com", - "is-a-financialadvisor.com", - "is-a-geek.com", - "is-a-geek.net", - "is-a-geek.org", - "is-a-green.com", - "is-a-guru.com", - "is-a-hard-worker.com", - "is-a-hunter.com", - "is-a-knight.org", - "is-a-landscaper.com", - "is-a-lawyer.com", - "is-a-liberal.com", - "is-a-libertarian.com", - "is-a-linux-user.org", - "is-a-llama.com", - "is-a-musician.com", - "is-a-nascarfan.com", - "is-a-nurse.com", - "is-a-painter.com", - "is-a-patsfan.org", - "is-a-personaltrainer.com", - "is-a-photographer.com", - "is-a-player.com", - "is-a-republican.com", - "is-a-rockstar.com", - "is-a-socialist.com", - "is-a-soxfan.org", - "is-a-student.com", - "is-a-teacher.com", - "is-a-techie.com", - "is-a-therapist.com", - "is-an-accountant.com", - "is-an-actor.com", - "is-an-actress.com", - "is-an-anarchist.com", - "is-an-artist.com", - "is-an-engineer.com", - "is-an-entertainer.com", - "is-by.us", - "is-certified.com", - "is-found.org", - "is-gone.com", - "is-into-anime.com", - "is-into-cars.com", - "is-into-cartoons.com", - "is-into-games.com", - "is-leet.com", - "is-lost.org", - "is-not-certified.com", - "is-saved.org", - "is-slick.com", - "is-uberleet.com", - "is-very-bad.org", - "is-very-evil.org", - "is-very-good.org", - "is-very-nice.org", - "is-very-sweet.org", - "is-with-theband.com", - "isa-geek.com", - "isa-geek.net", - "isa-geek.org", - "isa-hockeynut.com", - "issmarterthanyou.com", - "isteingeek.de", - "istmein.de", - "kicks-ass.net", - "kicks-ass.org", - "knowsitall.info", - "land-4-sale.us", - "lebtimnetz.de", - "leitungsen.de", - "likes-pie.com", - "likescandy.com", - "merseine.nu", - "mine.nu", - "misconfused.org", - "mypets.ws", - "myphotos.cc", - "neat-url.com", - "office-on-the.net", - "on-the-web.tv", - "podzone.net", - "podzone.org", - "readmyblog.org", - "saves-the-whales.com", - "scrapper-site.net", - "scrapping.cc", - "selfip.biz", - "selfip.com", - "selfip.info", - "selfip.net", - "selfip.org", - "sells-for-less.com", - "sells-for-u.com", - "sells-it.net", - "sellsyourhome.org", - "servebbs.com", - "servebbs.net", - "servebbs.org", - "serveftp.net", - "serveftp.org", - "servegame.org", - "shacknet.nu", - "simple-url.com", - "space-to-rent.com", - "stuff-4-sale.org", - "stuff-4-sale.us", - "teaches-yoga.com", - "thruhere.net", - "traeumtgerade.de", - "webhop.biz", - "webhop.info", - "webhop.net", - "webhop.org", - "worse-than.tv", - "writesthisblog.com", - "ddnss.de", - "dyn.ddnss.de", - "dyndns.ddnss.de", - "dyndns1.de", - "dyn-ip24.de", - "home-webserver.de", - "dyn.home-webserver.de", - "myhome-server.de", - "ddnss.org", - "definima.net", - "definima.io", - "ddnsfree.com", - "ddnsgeek.com", - "giize.com", - "gleeze.com", - "kozow.com", - "loseyourip.com", - "ooguy.com", - "theworkpc.com", - "casacam.net", - "dynu.net", - "accesscam.org", - "camdvr.org", - "freeddns.org", - "mywire.org", - "webredirect.org", - "myddns.rocks", - "blogsite.xyz", - "dynv6.net", - "e4.cz", - "mytuleap.com", - "enonic.io", - "customer.enonic.io", - "eu.org", - "al.eu.org", - "asso.eu.org", - "at.eu.org", - "au.eu.org", - "be.eu.org", - "bg.eu.org", - "ca.eu.org", - "cd.eu.org", - "ch.eu.org", - "cn.eu.org", - "cy.eu.org", - "cz.eu.org", - "de.eu.org", - "dk.eu.org", - "edu.eu.org", - "ee.eu.org", - "es.eu.org", - "fi.eu.org", - "fr.eu.org", - "gr.eu.org", - "hr.eu.org", - "hu.eu.org", - "ie.eu.org", - "il.eu.org", - "in.eu.org", - "int.eu.org", - "is.eu.org", - "it.eu.org", - "jp.eu.org", - "kr.eu.org", - "lt.eu.org", - "lu.eu.org", - "lv.eu.org", - "mc.eu.org", - "me.eu.org", - "mk.eu.org", - "mt.eu.org", - "my.eu.org", - "net.eu.org", - "ng.eu.org", - "nl.eu.org", - "no.eu.org", - "nz.eu.org", - "paris.eu.org", - "pl.eu.org", - "pt.eu.org", - "q-a.eu.org", - "ro.eu.org", - "ru.eu.org", - "se.eu.org", - "si.eu.org", - "sk.eu.org", - "tr.eu.org", - "uk.eu.org", - "us.eu.org", - "eu-1.evennode.com", - "eu-2.evennode.com", - "eu-3.evennode.com", - "eu-4.evennode.com", - "us-1.evennode.com", - "us-2.evennode.com", - "us-3.evennode.com", - "us-4.evennode.com", - "twmail.cc", - "twmail.net", - "twmail.org", - "mymailer.com.tw", - "url.tw", - "apps.fbsbx.com", - "ru.net", - "adygeya.ru", - "bashkiria.ru", - "bir.ru", - "cbg.ru", - "com.ru", - "dagestan.ru", - "grozny.ru", - "kalmykia.ru", - "kustanai.ru", - "marine.ru", - "mordovia.ru", - "msk.ru", - "mytis.ru", - "nalchik.ru", - "nov.ru", - "pyatigorsk.ru", - "spb.ru", - "vladikavkaz.ru", - "vladimir.ru", - "abkhazia.su", - "adygeya.su", - "aktyubinsk.su", - "arkhangelsk.su", - "armenia.su", - "ashgabad.su", - "azerbaijan.su", - "balashov.su", - "bashkiria.su", - "bryansk.su", - "bukhara.su", - "chimkent.su", - "dagestan.su", - "east-kazakhstan.su", - "exnet.su", - "georgia.su", - "grozny.su", - "ivanovo.su", - "jambyl.su", - "kalmykia.su", - "kaluga.su", - "karacol.su", - "karaganda.su", - "karelia.su", - "khakassia.su", - "krasnodar.su", - "kurgan.su", - "kustanai.su", - "lenug.su", - "mangyshlak.su", - "mordovia.su", - "msk.su", - "murmansk.su", - "nalchik.su", - "navoi.su", - "north-kazakhstan.su", - "nov.su", - "obninsk.su", - "penza.su", - "pokrovsk.su", - "sochi.su", - "spb.su", - "tashkent.su", - "termez.su", - "togliatti.su", - "troitsk.su", - "tselinograd.su", - "tula.su", - "tuva.su", - "vladikavkaz.su", - "vladimir.su", - "vologda.su", - "channelsdvr.net", - "fastlylb.net", - "map.fastlylb.net", - "freetls.fastly.net", - "map.fastly.net", - "a.prod.fastly.net", - "global.prod.fastly.net", - "a.ssl.fastly.net", - "b.ssl.fastly.net", - "global.ssl.fastly.net", - "fhapp.xyz", - "fedorainfracloud.org", - "fedorapeople.org", - "cloud.fedoraproject.org", - "filegear.me", - "firebaseapp.com", - "flynnhub.com", - "flynnhosting.net", - "freebox-os.com", - "freeboxos.com", - "fbx-os.fr", - "fbxos.fr", - "freebox-os.fr", - "freeboxos.fr", - "myfusion.cloud", - "*.futurecms.at", - "futurehosting.at", - "futuremailing.at", - "*.ex.ortsinfo.at", - "*.kunden.ortsinfo.at", - "*.statics.cloud", - "service.gov.uk", - "github.io", - "githubusercontent.com", - "gitlab.io", - "homeoffice.gov.uk", - "ro.im", - "shop.ro", - "goip.de", - "*.0emm.com", - "appspot.com", - "blogspot.ae", - "blogspot.al", - "blogspot.am", - "blogspot.ba", - "blogspot.be", - "blogspot.bg", - "blogspot.bj", - "blogspot.ca", - "blogspot.cf", - "blogspot.ch", - "blogspot.cl", - "blogspot.co.at", - "blogspot.co.id", - "blogspot.co.il", - "blogspot.co.ke", - "blogspot.co.nz", - "blogspot.co.uk", - "blogspot.co.za", - "blogspot.com", - "blogspot.com.ar", - "blogspot.com.au", - "blogspot.com.br", - "blogspot.com.by", - "blogspot.com.co", - "blogspot.com.cy", - "blogspot.com.ee", - "blogspot.com.eg", - "blogspot.com.es", - "blogspot.com.mt", - "blogspot.com.ng", - "blogspot.com.tr", - "blogspot.com.uy", - "blogspot.cv", - "blogspot.cz", - "blogspot.de", - "blogspot.dk", - "blogspot.fi", - "blogspot.fr", - "blogspot.gr", - "blogspot.hk", - "blogspot.hr", - "blogspot.hu", - "blogspot.ie", - "blogspot.in", - "blogspot.is", - "blogspot.it", - "blogspot.jp", - "blogspot.kr", - "blogspot.li", - "blogspot.lt", - "blogspot.lu", - "blogspot.md", - "blogspot.mk", - "blogspot.mr", - "blogspot.mx", - "blogspot.my", - "blogspot.nl", - "blogspot.no", - "blogspot.pe", - "blogspot.pt", - "blogspot.qa", - "blogspot.re", - "blogspot.ro", - "blogspot.rs", - "blogspot.ru", - "blogspot.se", - "blogspot.sg", - "blogspot.si", - "blogspot.sk", - "blogspot.sn", - "blogspot.td", - "blogspot.tw", - "blogspot.ug", - "blogspot.vn", - "cloudfunctions.net", - "cloud.goog", - "codespot.com", - "googleapis.com", - "googlecode.com", - "pagespeedmobilizer.com", - "publishproxy.com", - "withgoogle.com", - "withyoutube.com", - "hashbang.sh", - "hasura-app.io", - "hepforge.org", - "herokuapp.com", - "herokussl.com", - "moonscale.net", - "iki.fi", - "biz.at", - "info.at", - "info.cx", - "ac.leg.br", - "al.leg.br", - "am.leg.br", - "ap.leg.br", - "ba.leg.br", - "ce.leg.br", - "df.leg.br", - "es.leg.br", - "go.leg.br", - "ma.leg.br", - "mg.leg.br", - "ms.leg.br", - "mt.leg.br", - "pa.leg.br", - "pb.leg.br", - "pe.leg.br", - "pi.leg.br", - "pr.leg.br", - "rj.leg.br", - "rn.leg.br", - "ro.leg.br", - "rr.leg.br", - "rs.leg.br", - "sc.leg.br", - "se.leg.br", - "sp.leg.br", - "to.leg.br", - "pixolino.com", - "ipifony.net", - "*.triton.zone", - "*.cns.joyent.com", - "js.org", - "keymachine.de", - "knightpoint.systems", - "co.krd", - "edu.krd", - "git-repos.de", - "lcube-server.de", - "svn-repos.de", - "we.bs", - "barsy.bg", - "barsyonline.com", - "barsy.de", - "barsy.eu", - "barsy.in", - "barsy.net", - "barsy.online", - "barsy.support", - "*.magentosite.cloud", - "hb.cldmail.ru", - "cloud.metacentrum.cz", - "custom.metacentrum.cz", - "meteorapp.com", - "eu.meteorapp.com", - "co.pl", - "azurewebsites.net", - "azure-mobile.net", - "cloudapp.net", - "bmoattachments.org", - "net.ru", - "org.ru", - "pp.ru", - "bitballoon.com", - "netlify.com", - "4u.com", - "ngrok.io", - "nfshost.com", - "nsupdate.info", - "nerdpol.ovh", - "blogsyte.com", - "brasilia.me", - "cable-modem.org", - "ciscofreak.com", - "collegefan.org", - "couchpotatofries.org", - "damnserver.com", - "ddns.me", - "ditchyourip.com", - "dnsfor.me", - "dnsiskinky.com", - "dvrcam.info", - "dynns.com", - "eating-organic.net", - "fantasyleague.cc", - "geekgalaxy.com", - "golffan.us", - "health-carereform.com", - "homesecuritymac.com", - "homesecuritypc.com", - "hopto.me", - "ilovecollege.info", - "loginto.me", - "mlbfan.org", - "mmafan.biz", - "myactivedirectory.com", - "mydissent.net", - "myeffect.net", - "mymediapc.net", - "mypsx.net", - "mysecuritycamera.com", - "mysecuritycamera.net", - "mysecuritycamera.org", - "net-freaks.com", - "nflfan.org", - "nhlfan.net", - "no-ip.ca", - "no-ip.co.uk", - "no-ip.net", - "noip.us", - "onthewifi.com", - "pgafan.net", - "point2this.com", - "pointto.us", - "privatizehealthinsurance.net", - "quicksytes.com", - "read-books.org", - "securitytactics.com", - "serveexchange.com", - "servehumour.com", - "servep2p.com", - "servesarcasm.com", - "stufftoread.com", - "ufcfan.org", - "unusualperson.com", - "workisboring.com", - "3utilities.com", - "bounceme.net", - "ddns.net", - "ddnsking.com", - "gotdns.ch", - "hopto.org", - "myftp.biz", - "myftp.org", - "myvnc.com", - "no-ip.biz", - "no-ip.info", - "no-ip.org", - "noip.me", - "redirectme.net", - "servebeer.com", - "serveblog.net", - "servecounterstrike.com", - "serveftp.com", - "servegame.com", - "servehalflife.com", - "servehttp.com", - "serveirc.com", - "serveminecraft.net", - "servemp3.com", - "servepics.com", - "servequake.com", - "sytes.net", - "webhop.me", - "zapto.org", - "stage.nodeart.io", - "nodum.co", - "nodum.io", - "nyc.mn", - "nom.ae", - "nom.ai", - "nom.al", - "nym.by", - "nym.bz", - "nom.cl", - "nom.gd", - "nom.gl", - "nym.gr", - "nom.gt", - "nom.hn", - "nom.im", - "nym.kz", - "nym.la", - "nom.li", - "nym.li", - "nym.lt", - "nym.lu", - "nym.me", - "nom.mk", - "nym.mx", - "nom.nu", - "nym.nz", - "nym.pe", - "nym.pt", - "nom.pw", - "nom.qa", - "nom.rs", - "nom.si", - "nym.sk", - "nym.su", - "nym.sx", - "nym.tw", - "nom.ug", - "nom.uy", - "nom.vc", - "nom.vg", - "cya.gg", - "nid.io", - "opencraft.hosting", - "operaunite.com", - "outsystemscloud.com", - "ownprovider.com", - "oy.lc", - "pgfog.com", - "pagefrontapp.com", - "art.pl", - "gliwice.pl", - "krakow.pl", - "poznan.pl", - "wroc.pl", - "zakopane.pl", - "pantheonsite.io", - "gotpantheon.com", - "mypep.link", - "on-web.fr", - "*.platform.sh", - "*.platformsh.site", - "xen.prgmr.com", - "priv.at", - "protonet.io", - "chirurgiens-dentistes-en-france.fr", - "byen.site", - "qa2.com", - "dev-myqnapcloud.com", - "alpha-myqnapcloud.com", - "myqnapcloud.com", - "*.quipelements.com", - "vapor.cloud", - "vaporcloud.io", - "rackmaze.com", - "rackmaze.net", - "rhcloud.com", - "hzc.io", - "wellbeingzone.eu", - "ptplus.fit", - "wellbeingzone.co.uk", - "sandcats.io", - "logoip.de", - "logoip.com", - "firewall-gateway.com", - "firewall-gateway.de", - "my-gateway.de", - "my-router.de", - "spdns.de", - "spdns.eu", - "firewall-gateway.net", - "my-firewall.org", - "myfirewall.org", - "spdns.org", - "*.sensiosite.cloud", - "biz.ua", - "co.ua", - "pp.ua", - "shiftedit.io", - "myshopblocks.com", - "1kapp.com", - "appchizi.com", - "applinzi.com", - "sinaapp.com", - "vipsinaapp.com", - "bounty-full.com", - "alpha.bounty-full.com", - "beta.bounty-full.com", - "static.land", - "dev.static.land", - "sites.static.land", - "apps.lair.io", - "*.stolos.io", - "spacekit.io", - "stackspace.space", - "storj.farm", - "temp-dns.com", - "diskstation.me", - "dscloud.biz", - "dscloud.me", - "dscloud.mobi", - "dsmynas.com", - "dsmynas.net", - "dsmynas.org", - "familyds.com", - "familyds.net", - "familyds.org", - "i234.me", - "myds.me", - "synology.me", - "vpnplus.to", - "taifun-dns.de", - "gda.pl", - "gdansk.pl", - "gdynia.pl", - "med.pl", - "sopot.pl", - "cust.dev.thingdust.io", - "cust.disrec.thingdust.io", - "cust.prod.thingdust.io", - "cust.testing.thingdust.io", - "bloxcms.com", - "townnews-staging.com", - "12hp.at", - "2ix.at", - "4lima.at", - "lima-city.at", - "12hp.ch", - "2ix.ch", - "4lima.ch", - "lima-city.ch", - "trafficplex.cloud", - "de.cool", - "12hp.de", - "2ix.de", - "4lima.de", - "lima-city.de", - "1337.pictures", - "clan.rip", - "lima-city.rocks", - "webspace.rocks", - "lima.zone", - "*.transurl.be", - "*.transurl.eu", - "*.transurl.nl", - "tuxfamily.org", - "dd-dns.de", - "diskstation.eu", - "diskstation.org", - "dray-dns.de", - "draydns.de", - "dyn-vpn.de", - "dynvpn.de", - "mein-vigor.de", - "my-vigor.de", - "my-wan.de", - "syno-ds.de", - "synology-diskstation.de", - "synology-ds.de", - "uber.space", - "hk.com", - "hk.org", - "ltd.hk", - "inc.hk", - "lib.de.us", - "router.management", - "v-info.info", - "wedeploy.io", - "wedeploy.me", - "wedeploy.sh", - "remotewd.com", - "wmflabs.org", - "cistron.nl", - "demon.nl", - "xs4all.space", - "yolasite.com", - "ybo.faith", - "yombo.me", - "homelink.one", - "ybo.party", - "ybo.review", - "ybo.science", - "ybo.trade", - "za.net", - "za.org", - "now.sh", -} - -var nodeLabels = [...]string{ - "aaa", - "aarp", - "abarth", - "abb", - "abbott", - "abbvie", - "abc", - "able", - "abogado", - "abudhabi", - "ac", - "academy", - "accenture", - "accountant", - "accountants", - "aco", - "active", - "actor", - "ad", - "adac", - "ads", - "adult", - "ae", - "aeg", - "aero", - "aetna", - "af", - "afamilycompany", - "afl", - "africa", - "ag", - "agakhan", - "agency", - "ai", - "aig", - "aigo", - "airbus", - "airforce", - "airtel", - "akdn", - "al", - "alfaromeo", - "alibaba", - "alipay", - "allfinanz", - "allstate", - "ally", - "alsace", - "alstom", - "am", - "americanexpress", - "americanfamily", - "amex", - "amfam", - "amica", - "amsterdam", - "analytics", - "android", - "anquan", - "anz", - "ao", - "aol", - "apartments", - "app", - "apple", - "aq", - "aquarelle", - "ar", - "arab", - "aramco", - "archi", - "army", - "arpa", - "art", - "arte", - "as", - "asda", - "asia", - "associates", - "at", - "athleta", - "attorney", - "au", - "auction", - "audi", - "audible", - "audio", - "auspost", - "author", - "auto", - "autos", - "avianca", - "aw", - "aws", - "ax", - "axa", - "az", - "azure", - "ba", - "baby", - "baidu", - "banamex", - "bananarepublic", - "band", - "bank", - "bar", - "barcelona", - "barclaycard", - "barclays", - "barefoot", - "bargains", - "baseball", - "basketball", - "bauhaus", - "bayern", - "bb", - "bbc", - "bbt", - "bbva", - "bcg", - "bcn", - "bd", - "be", - "beats", - "beauty", - "beer", - "bentley", - "berlin", - "best", - "bestbuy", - "bet", - "bf", - "bg", - "bh", - "bharti", - "bi", - "bible", - "bid", - "bike", - "bing", - "bingo", - "bio", - "biz", - "bj", - "black", - "blackfriday", - "blanco", - "blockbuster", - "blog", - "bloomberg", - "blue", - "bm", - "bms", - "bmw", - "bn", - "bnl", - "bnpparibas", - "bo", - "boats", - "boehringer", - "bofa", - "bom", - "bond", - "boo", - "book", - "booking", - "boots", - "bosch", - "bostik", - "boston", - "bot", - "boutique", - "box", - "br", - "bradesco", - "bridgestone", - "broadway", - "broker", - "brother", - "brussels", - "bs", - "bt", - "budapest", - "bugatti", - "build", - "builders", - "business", - "buy", - "buzz", - "bv", - "bw", - "by", - "bz", - "bzh", - "ca", - "cab", - "cafe", - "cal", - "call", - "calvinklein", - "cam", - "camera", - "camp", - "cancerresearch", - "canon", - "capetown", - "capital", - "capitalone", - "car", - "caravan", - "cards", - "care", - "career", - "careers", - "cars", - "cartier", - "casa", - "case", - "caseih", - "cash", - "casino", - "cat", - "catering", - "catholic", - "cba", - "cbn", - "cbre", - "cbs", - "cc", - "cd", - "ceb", - "center", - "ceo", - "cern", - "cf", - "cfa", - "cfd", - "cg", - "ch", - "chanel", - "channel", - "chase", - "chat", - "cheap", - "chintai", - "chloe", - "christmas", - "chrome", - "chrysler", - "church", - "ci", - "cipriani", - "circle", - "cisco", - "citadel", - "citi", - "citic", - "city", - "cityeats", - "ck", - "cl", - "claims", - "cleaning", - "click", - "clinic", - "clinique", - "clothing", - "cloud", - "club", - "clubmed", - "cm", - "cn", - "co", - "coach", - "codes", - "coffee", - "college", - "cologne", - "com", - "comcast", - "commbank", - "community", - "company", - "compare", - "computer", - "comsec", - "condos", - "construction", - "consulting", - "contact", - "contractors", - "cooking", - "cookingchannel", - "cool", - "coop", - "corsica", - "country", - "coupon", - "coupons", - "courses", - "cr", - "credit", - "creditcard", - "creditunion", - "cricket", - "crown", - "crs", - "cruise", - "cruises", - "csc", - "cu", - "cuisinella", - "cv", - "cw", - "cx", - "cy", - "cymru", - "cyou", - "cz", - "dabur", - "dad", - "dance", - "data", - "date", - "dating", - "datsun", - "day", - "dclk", - "dds", - "de", - "deal", - "dealer", - "deals", - "degree", - "delivery", - "dell", - "deloitte", - "delta", - "democrat", - "dental", - "dentist", - "desi", - "design", - "dev", - "dhl", - "diamonds", - "diet", - "digital", - "direct", - "directory", - "discount", - "discover", - "dish", - "diy", - "dj", - "dk", - "dm", - "dnp", - "do", - "docs", - "doctor", - "dodge", - "dog", - "doha", - "domains", - "dot", - "download", - "drive", - "dtv", - "dubai", - "duck", - "dunlop", - "duns", - "dupont", - "durban", - "dvag", - "dvr", - "dz", - "earth", - "eat", - "ec", - "eco", - "edeka", - "edu", - "education", - "ee", - "eg", - "email", - "emerck", - "energy", - "engineer", - "engineering", - "enterprises", - "epost", - "epson", - "equipment", - "er", - "ericsson", - "erni", - "es", - "esq", - "estate", - "esurance", - "et", - "etisalat", - "eu", - "eurovision", - "eus", - "events", - "everbank", - "exchange", - "expert", - "exposed", - "express", - "extraspace", - "fage", - "fail", - "fairwinds", - "faith", - "family", - "fan", - "fans", - "farm", - "farmers", - "fashion", - "fast", - "fedex", - "feedback", - "ferrari", - "ferrero", - "fi", - "fiat", - "fidelity", - "fido", - "film", - "final", - "finance", - "financial", - "fire", - "firestone", - "firmdale", - "fish", - "fishing", - "fit", - "fitness", - "fj", - "fk", - "flickr", - "flights", - "flir", - "florist", - "flowers", - "fly", - "fm", - "fo", - "foo", - "food", - "foodnetwork", - "football", - "ford", - "forex", - "forsale", - "forum", - "foundation", - "fox", - "fr", - "free", - "fresenius", - "frl", - "frogans", - "frontdoor", - "frontier", - "ftr", - "fujitsu", - "fujixerox", - "fun", - "fund", - "furniture", - "futbol", - "fyi", - "ga", - "gal", - "gallery", - "gallo", - "gallup", - "game", - "games", - "gap", - "garden", - "gb", - "gbiz", - "gd", - "gdn", - "ge", - "gea", - "gent", - "genting", - "george", - "gf", - "gg", - "ggee", - "gh", - "gi", - "gift", - "gifts", - "gives", - "giving", - "gl", - "glade", - "glass", - "gle", - "global", - "globo", - "gm", - "gmail", - "gmbh", - "gmo", - "gmx", - "gn", - "godaddy", - "gold", - "goldpoint", - "golf", - "goo", - "goodhands", - "goodyear", - "goog", - "google", - "gop", - "got", - "gov", - "gp", - "gq", - "gr", - "grainger", - "graphics", - "gratis", - "green", - "gripe", - "grocery", - "group", - "gs", - "gt", - "gu", - "guardian", - "gucci", - "guge", - "guide", - "guitars", - "guru", - "gw", - "gy", - "hair", - "hamburg", - "hangout", - "haus", - "hbo", - "hdfc", - "hdfcbank", - "health", - "healthcare", - "help", - "helsinki", - "here", - "hermes", - "hgtv", - "hiphop", - "hisamitsu", - "hitachi", - "hiv", - "hk", - "hkt", - "hm", - "hn", - "hockey", - "holdings", - "holiday", - "homedepot", - "homegoods", - "homes", - "homesense", - "honda", - "honeywell", - "horse", - "hospital", - "host", - "hosting", - "hot", - "hoteles", - "hotels", - "hotmail", - "house", - "how", - "hr", - "hsbc", - "ht", - "htc", - "hu", - "hughes", - "hyatt", - "hyundai", - "ibm", - "icbc", - "ice", - "icu", - "id", - "ie", - "ieee", - "ifm", - "ikano", - "il", - "im", - "imamat", - "imdb", - "immo", - "immobilien", - "in", - "industries", - "infiniti", - "info", - "ing", - "ink", - "institute", - "insurance", - "insure", - "int", - "intel", - "international", - "intuit", - "investments", - "io", - "ipiranga", - "iq", - "ir", - "irish", - "is", - "iselect", - "ismaili", - "ist", - "istanbul", - "it", - "itau", - "itv", - "iveco", - "iwc", - "jaguar", - "java", - "jcb", - "jcp", - "je", - "jeep", - "jetzt", - "jewelry", - "jio", - "jlc", - "jll", - "jm", - "jmp", - "jnj", - "jo", - "jobs", - "joburg", - "jot", - "joy", - "jp", - "jpmorgan", - "jprs", - "juegos", - "juniper", - "kaufen", - "kddi", - "ke", - "kerryhotels", - "kerrylogistics", - "kerryproperties", - "kfh", - "kg", - "kh", - "ki", - "kia", - "kim", - "kinder", - "kindle", - "kitchen", - "kiwi", - "km", - "kn", - "koeln", - "komatsu", - "kosher", - "kp", - "kpmg", - "kpn", - "kr", - "krd", - "kred", - "kuokgroup", - "kw", - "ky", - "kyoto", - "kz", - "la", - "lacaixa", - "ladbrokes", - "lamborghini", - "lamer", - "lancaster", - "lancia", - "lancome", - "land", - "landrover", - "lanxess", - "lasalle", - "lat", - "latino", - "latrobe", - "law", - "lawyer", - "lb", - "lc", - "lds", - "lease", - "leclerc", - "lefrak", - "legal", - "lego", - "lexus", - "lgbt", - "li", - "liaison", - "lidl", - "life", - "lifeinsurance", - "lifestyle", - "lighting", - "like", - "lilly", - "limited", - "limo", - "lincoln", - "linde", - "link", - "lipsy", - "live", - "living", - "lixil", - "lk", - "loan", - "loans", - "locker", - "locus", - "loft", - "lol", - "london", - "lotte", - "lotto", - "love", - "lpl", - "lplfinancial", - "lr", - "ls", - "lt", - "ltd", - "ltda", - "lu", - "lundbeck", - "lupin", - "luxe", - "luxury", - "lv", - "ly", - "ma", - "macys", - "madrid", - "maif", - "maison", - "makeup", - "man", - "management", - "mango", - "map", - "market", - "marketing", - "markets", - "marriott", - "marshalls", - "maserati", - "mattel", - "mba", - "mc", - "mcd", - "mcdonalds", - "mckinsey", - "md", - "me", - "med", - "media", - "meet", - "melbourne", - "meme", - "memorial", - "men", - "menu", - "meo", - "merckmsd", - "metlife", - "mg", - "mh", - "miami", - "microsoft", - "mil", - "mini", - "mint", - "mit", - "mitsubishi", - "mk", - "ml", - "mlb", - "mls", - "mm", - "mma", - "mn", - "mo", - "mobi", - "mobile", - "mobily", - "moda", - "moe", - "moi", - "mom", - "monash", - "money", - "monster", - "montblanc", - "mopar", - "mormon", - "mortgage", - "moscow", - "moto", - "motorcycles", - "mov", - "movie", - "movistar", - "mp", - "mq", - "mr", - "ms", - "msd", - "mt", - "mtn", - "mtpc", - "mtr", - "mu", - "museum", - "mutual", - "mv", - "mw", - "mx", - "my", - "mz", - "na", - "nab", - "nadex", - "nagoya", - "name", - "nationwide", - "natura", - "navy", - "nba", - "nc", - "ne", - "nec", - "net", - "netbank", - "netflix", - "network", - "neustar", - "new", - "newholland", - "news", - "next", - "nextdirect", - "nexus", - "nf", - "nfl", - "ng", - "ngo", - "nhk", - "ni", - "nico", - "nike", - "nikon", - "ninja", - "nissan", - "nissay", - "nl", - "no", - "nokia", - "northwesternmutual", - "norton", - "now", - "nowruz", - "nowtv", - "np", - "nr", - "nra", - "nrw", - "ntt", - "nu", - "nyc", - "nz", - "obi", - "observer", - "off", - "office", - "okinawa", - "olayan", - "olayangroup", - "oldnavy", - "ollo", - "om", - "omega", - "one", - "ong", - "onion", - "onl", - "online", - "onyourside", - "ooo", - "open", - "oracle", - "orange", - "org", - "organic", - "origins", - "osaka", - "otsuka", - "ott", - "ovh", - "pa", - "page", - "pamperedchef", - "panasonic", - "panerai", - "paris", - "pars", - "partners", - "parts", - "party", - "passagens", - "pay", - "pccw", - "pe", - "pet", - "pf", - "pfizer", - "pg", - "ph", - "pharmacy", - "phd", - "philips", - "phone", - "photo", - "photography", - "photos", - "physio", - "piaget", - "pics", - "pictet", - "pictures", - "pid", - "pin", - "ping", - "pink", - "pioneer", - "pizza", - "pk", - "pl", - "place", - "play", - "playstation", - "plumbing", - "plus", - "pm", - "pn", - "pnc", - "pohl", - "poker", - "politie", - "porn", - "post", - "pr", - "pramerica", - "praxi", - "press", - "prime", - "pro", - "prod", - "productions", - "prof", - "progressive", - "promo", - "properties", - "property", - "protection", - "pru", - "prudential", - "ps", - "pt", - "pub", - "pw", - "pwc", - "py", - "qa", - "qpon", - "quebec", - "quest", - "qvc", - "racing", - "radio", - "raid", - "re", - "read", - "realestate", - "realtor", - "realty", - "recipes", - "red", - "redstone", - "redumbrella", - "rehab", - "reise", - "reisen", - "reit", - "reliance", - "ren", - "rent", - "rentals", - "repair", - "report", - "republican", - "rest", - "restaurant", - "review", - "reviews", - "rexroth", - "rich", - "richardli", - "ricoh", - "rightathome", - "ril", - "rio", - "rip", - "rmit", - "ro", - "rocher", - "rocks", - "rodeo", - "rogers", - "room", - "rs", - "rsvp", - "ru", - "rugby", - "ruhr", - "run", - "rw", - "rwe", - "ryukyu", - "sa", - "saarland", - "safe", - "safety", - "sakura", - "sale", - "salon", - "samsclub", - "samsung", - "sandvik", - "sandvikcoromant", - "sanofi", - "sap", - "sapo", - "sarl", - "sas", - "save", - "saxo", - "sb", - "sbi", - "sbs", - "sc", - "sca", - "scb", - "schaeffler", - "schmidt", - "scholarships", - "school", - "schule", - "schwarz", - "science", - "scjohnson", - "scor", - "scot", - "sd", - "se", - "search", - "seat", - "secure", - "security", - "seek", - "select", - "sener", - "services", - "ses", - "seven", - "sew", - "sex", - "sexy", - "sfr", - "sg", - "sh", - "shangrila", - "sharp", - "shaw", - "shell", - "shia", - "shiksha", - "shoes", - "shop", - "shopping", - "shouji", - "show", - "showtime", - "shriram", - "si", - "silk", - "sina", - "singles", - "site", - "sj", - "sk", - "ski", - "skin", - "sky", - "skype", - "sl", - "sling", - "sm", - "smart", - "smile", - "sn", - "sncf", - "so", - "soccer", - "social", - "softbank", - "software", - "sohu", - "solar", - "solutions", - "song", - "sony", - "soy", - "space", - "spiegel", - "spot", - "spreadbetting", - "sr", - "srl", - "srt", - "st", - "stada", - "staples", - "star", - "starhub", - "statebank", - "statefarm", - "statoil", - "stc", - "stcgroup", - "stockholm", - "storage", - "store", - "stream", - "studio", - "study", - "style", - "su", - "sucks", - "supplies", - "supply", - "support", - "surf", - "surgery", - "suzuki", - "sv", - "swatch", - "swiftcover", - "swiss", - "sx", - "sy", - "sydney", - "symantec", - "systems", - "sz", - "tab", - "taipei", - "talk", - "taobao", - "target", - "tatamotors", - "tatar", - "tattoo", - "tax", - "taxi", - "tc", - "tci", - "td", - "tdk", - "team", - "tech", - "technology", - "tel", - "telecity", - "telefonica", - "temasek", - "tennis", - "teva", - "tf", - "tg", - "th", - "thd", - "theater", - "theatre", - "tiaa", - "tickets", - "tienda", - "tiffany", - "tips", - "tires", - "tirol", - "tj", - "tjmaxx", - "tjx", - "tk", - "tkmaxx", - "tl", - "tm", - "tmall", - "tn", - "to", - "today", - "tokyo", - "tools", - "top", - "toray", - "toshiba", - "total", - "tours", - "town", - "toyota", - "toys", - "tr", - "trade", - "trading", - "training", - "travel", - "travelchannel", - "travelers", - "travelersinsurance", - "trust", - "trv", - "tt", - "tube", - "tui", - "tunes", - "tushu", - "tv", - "tvs", - "tw", - "tz", - "ua", - "ubank", - "ubs", - "uconnect", - "ug", - "uk", - "unicom", - "university", - "uno", - "uol", - "ups", - "us", - "uy", - "uz", - "va", - "vacations", - "vana", - "vanguard", - "vc", - "ve", - "vegas", - "ventures", - "verisign", - "versicherung", - "vet", - "vg", - "vi", - "viajes", - "video", - "vig", - "viking", - "villas", - "vin", - "vip", - "virgin", - "visa", - "vision", - "vista", - "vistaprint", - "viva", - "vivo", - "vlaanderen", - "vn", - "vodka", - "volkswagen", - "volvo", - "vote", - "voting", - "voto", - "voyage", - "vu", - "vuelos", - "wales", - "walmart", - "walter", - "wang", - "wanggou", - "warman", - "watch", - "watches", - "weather", - "weatherchannel", - "webcam", - "weber", - "website", - "wed", - "wedding", - "weibo", - "weir", - "wf", - "whoswho", - "wien", - "wiki", - "williamhill", - "win", - "windows", - "wine", - "winners", - "wme", - "wolterskluwer", - "woodside", - "work", - "works", - "world", - "wow", - "ws", - "wtc", - "wtf", - "xbox", - "xerox", - "xfinity", - "xihuan", - "xin", - "xn--11b4c3d", - "xn--1ck2e1b", - "xn--1qqw23a", - "xn--2scrj9c", - "xn--30rr7y", - "xn--3bst00m", - "xn--3ds443g", - "xn--3e0b707e", - "xn--3hcrj9c", - "xn--3oq18vl8pn36a", - "xn--3pxu8k", - "xn--42c2d9a", - "xn--45br5cyl", - "xn--45brj9c", - "xn--45q11c", - "xn--4gbrim", - "xn--54b7fta0cc", - "xn--55qw42g", - "xn--55qx5d", - "xn--5su34j936bgsg", - "xn--5tzm5g", - "xn--6frz82g", - "xn--6qq986b3xl", - "xn--80adxhks", - "xn--80ao21a", - "xn--80aqecdr1a", - "xn--80asehdb", - "xn--80aswg", - "xn--8y0a063a", - "xn--90a3ac", - "xn--90ae", - "xn--90ais", - "xn--9dbq2a", - "xn--9et52u", - "xn--9krt00a", - "xn--b4w605ferd", - "xn--bck1b9a5dre4c", - "xn--c1avg", - "xn--c2br7g", - "xn--cck2b3b", - "xn--cg4bki", - "xn--clchc0ea0b2g2a9gcd", - "xn--czr694b", - "xn--czrs0t", - "xn--czru2d", - "xn--d1acj3b", - "xn--d1alf", - "xn--e1a4c", - "xn--eckvdtc9d", - "xn--efvy88h", - "xn--estv75g", - "xn--fct429k", - "xn--fhbei", - "xn--fiq228c5hs", - "xn--fiq64b", - "xn--fiqs8s", - "xn--fiqz9s", - "xn--fjq720a", - "xn--flw351e", - "xn--fpcrj9c3d", - "xn--fzc2c9e2c", - "xn--fzys8d69uvgm", - "xn--g2xx48c", - "xn--gckr3f0f", - "xn--gecrj9c", - "xn--gk3at1e", - "xn--h2breg3eve", - "xn--h2brj9c", - "xn--h2brj9c8c", - "xn--hxt814e", - "xn--i1b6b1a6a2e", - "xn--imr513n", - "xn--io0a7i", - "xn--j1aef", - "xn--j1amh", - "xn--j6w193g", - "xn--jlq61u9w7b", - "xn--jvr189m", - "xn--kcrx77d1x4a", - "xn--kprw13d", - "xn--kpry57d", - "xn--kpu716f", - "xn--kput3i", - "xn--l1acc", - "xn--lgbbat1ad8j", - "xn--mgb2ddes", - "xn--mgb9awbf", - "xn--mgba3a3ejt", - "xn--mgba3a4f16a", - "xn--mgba3a4fra", - "xn--mgba7c0bbn0a", - "xn--mgbaakc7dvf", - "xn--mgbaam7a8h", - "xn--mgbab2bd", - "xn--mgbai9a5eva00b", - "xn--mgbai9azgqp6j", - "xn--mgbayh7gpa", - "xn--mgbb9fbpob", - "xn--mgbbh1a71e", - "xn--mgbc0a9azcg", - "xn--mgbca7dzdo", - "xn--mgberp4a5d4a87g", - "xn--mgberp4a5d4ar", - "xn--mgbgu82a", - "xn--mgbi4ecexp", - "xn--mgbpl2fh", - "xn--mgbqly7c0a67fbc", - "xn--mgbqly7cvafr", - "xn--mgbt3dhd", - "xn--mgbtf8fl", - "xn--mgbtx2b", - "xn--mgbx4cd0ab", - "xn--mix082f", - "xn--mix891f", - "xn--mk1bu44c", - "xn--mxtq1m", - "xn--ngbc5azd", - "xn--ngbe9e0a", - "xn--ngbrx", - "xn--nnx388a", - "xn--node", - "xn--nqv7f", - "xn--nqv7fs00ema", - "xn--nyqy26a", - "xn--o3cw4h", - "xn--ogbpf8fl", - "xn--p1acf", - "xn--p1ai", - "xn--pbt977c", - "xn--pgbs0dh", - "xn--pssy2u", - "xn--q9jyb4c", - "xn--qcka1pmc", - "xn--qxam", - "xn--rhqv96g", - "xn--rovu88b", - "xn--rvc1e0am3e", - "xn--s9brj9c", - "xn--ses554g", - "xn--t60b56a", - "xn--tckwe", - "xn--tiq49xqyj", - "xn--unup4y", - "xn--vermgensberater-ctb", - "xn--vermgensberatung-pwb", - "xn--vhquv", - "xn--vuq861b", - "xn--w4r85el8fhu5dnra", - "xn--w4rs40l", - "xn--wgbh1c", - "xn--wgbl6a", - "xn--xhq521b", - "xn--xkc2al3hye2a", - "xn--xkc2dl3a5ee0h", - "xn--y9a3aq", - "xn--yfro4i67o", - "xn--ygbi2ammx", - "xn--zfr164b", - "xperia", - "xxx", - "xyz", - "yachts", - "yahoo", - "yamaxun", - "yandex", - "ye", - "yodobashi", - "yoga", - "yokohama", - "you", - "youtube", - "yt", - "yun", - "za", - "zappos", - "zara", - "zero", - "zip", - "zippo", - "zm", - "zone", - "zuerich", - "zw", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "nom", - "ac", - "blogspot", - "co", - "gov", - "mil", - "net", - "nom", - "org", - "sch", - "accident-investigation", - "accident-prevention", - "aerobatic", - "aeroclub", - "aerodrome", - "agents", - "air-surveillance", - "air-traffic-control", - "aircraft", - "airline", - "airport", - "airtraffic", - "ambulance", - "amusement", - "association", - "author", - "ballooning", - "broker", - "caa", - "cargo", - "catering", - "certification", - "championship", - "charter", - "civilaviation", - "club", - "conference", - "consultant", - "consulting", - "control", - "council", - "crew", - "design", - "dgca", - "educator", - "emergency", - "engine", - "engineer", - "entertainment", - "equipment", - "exchange", - "express", - "federation", - "flight", - "freight", - "fuel", - "gliding", - "government", - "groundhandling", - "group", - "hanggliding", - "homebuilt", - "insurance", - "journal", - "journalist", - "leasing", - "logistics", - "magazine", - "maintenance", - "media", - "microlight", - "modelling", - "navigation", - "parachuting", - "paragliding", - "passenger-association", - "pilot", - "press", - "production", - "recreation", - "repbody", - "res", - "research", - "rotorcraft", - "safety", - "scientist", - "services", - "show", - "skydiving", - "software", - "student", - "trader", - "trading", - "trainer", - "union", - "workinggroup", - "works", - "com", - "edu", - "gov", - "net", - "org", - "co", - "com", - "net", - "nom", - "org", - "com", - "net", - "nom", - "off", - "org", - "blogspot", - "com", - "edu", - "gov", - "mil", - "net", - "nom", - "org", - "blogspot", - "co", - "ed", - "gv", - "it", - "og", - "pb", - "com", - "edu", - "gob", - "gov", - "int", - "mil", - "musica", - "net", - "org", - "tur", - "blogspot", - "e164", - "in-addr", - "ip6", - "iris", - "uri", - "urn", - "gov", - "cloudns", - "12hp", - "2ix", - "4lima", - "ac", - "biz", - "co", - "futurecms", - "futurehosting", - "futuremailing", - "gv", - "info", - "lima-city", - "or", - "ortsinfo", - "priv", - "blogspot", - "ex", - "kunden", - "act", - "asn", - "com", - "conf", - "edu", - "gov", - "id", - "info", - "net", - "nsw", - "nt", - "org", - "oz", - "qld", - "sa", - "tas", - "vic", - "wa", - "blogspot", - "act", - "nsw", - "nt", - "qld", - "sa", - "tas", - "vic", - "wa", - "qld", - "sa", - "tas", - "vic", - "wa", - "com", - "biz", - "com", - "edu", - "gov", - "info", - "int", - "mil", - "name", - "net", - "org", - "pp", - "pro", - "blogspot", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "biz", - "co", - "com", - "edu", - "gov", - "info", - "net", - "org", - "store", - "tv", - "ac", - "blogspot", - "transurl", - "gov", - "0", - "1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9", - "a", - "b", - "barsy", - "blogspot", - "c", - "d", - "e", - "f", - "g", - "h", - "i", - "j", - "k", - "l", - "m", - "n", - "o", - "p", - "q", - "r", - "s", - "t", - "u", - "v", - "w", - "x", - "y", - "z", - "com", - "edu", - "gov", - "net", - "org", - "co", - "com", - "edu", - "or", - "org", - "cloudns", - "dscloud", - "dyndns", - "for-better", - "for-more", - "for-some", - "for-the", - "mmafan", - "myftp", - "no-ip", - "selfip", - "webhop", - "asso", - "barreau", - "blogspot", - "gouv", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gob", - "gov", - "int", - "mil", - "net", - "org", - "tv", - "adm", - "adv", - "agr", - "am", - "arq", - "art", - "ato", - "b", - "belem", - "bio", - "blog", - "bmd", - "cim", - "cng", - "cnt", - "com", - "coop", - "cri", - "def", - "ecn", - "eco", - "edu", - "emp", - "eng", - "esp", - "etc", - "eti", - "far", - "flog", - "floripa", - "fm", - "fnd", - "fot", - "fst", - "g12", - "ggf", - "gov", - "imb", - "ind", - "inf", - "jampa", - "jor", - "jus", - "leg", - "lel", - "mat", - "med", - "mil", - "mp", - "mus", - "net", - "nom", - "not", - "ntr", - "odo", - "org", - "poa", - "ppg", - "pro", - "psc", - "psi", - "qsl", - "radio", - "rec", - "recife", - "slg", - "srv", - "taxi", - "teo", - "tmp", - "trd", - "tur", - "tv", - "vet", - "vix", - "vlog", - "wiki", - "zlg", - "blogspot", - "ac", - "al", - "am", - "ap", - "ba", - "ce", - "df", - "es", - "go", - "ma", - "mg", - "ms", - "mt", - "pa", - "pb", - "pe", - "pi", - "pr", - "rj", - "rn", - "ro", - "rr", - "rs", - "sc", - "se", - "sp", - "to", - "ac", - "al", - "am", - "ap", - "ba", - "ce", - "df", - "es", - "go", - "ma", - "mg", - "ms", - "mt", - "pa", - "pb", - "pe", - "pi", - "pr", - "rj", - "rn", - "ro", - "rr", - "rs", - "sc", - "se", - "sp", - "to", - "com", - "edu", - "gov", - "net", - "org", - "we", - "com", - "edu", - "gov", - "net", - "org", - "co", - "org", - "com", - "gov", - "mil", - "nym", - "of", - "blogspot", - "com", - "edu", - "gov", - "net", - "nym", - "org", - "za", - "ab", - "awdev", - "bc", - "blogspot", - "co", - "gc", - "mb", - "nb", - "nf", - "nl", - "no-ip", - "ns", - "nt", - "nu", - "on", - "pe", - "qc", - "sk", - "yk", - "cloudns", - "fantasyleague", - "ftpaccess", - "game-server", - "myphotos", - "scrapping", - "twmail", - "gov", - "blogspot", - "12hp", - "2ix", - "4lima", - "blogspot", - "gotdns", - "lima-city", - "square7", - "ac", - "asso", - "co", - "com", - "ed", - "edu", - "go", - "gouv", - "int", - "md", - "net", - "or", - "org", - "presse", - "xn--aroport-bya", - "www", - "blogspot", - "co", - "gob", - "gov", - "mil", - "nom", - "magentosite", - "myfusion", - "sensiosite", - "statics", - "trafficplex", - "vapor", - "cloudns", - "co", - "com", - "gov", - "net", - "ac", - "ah", - "bj", - "com", - "cq", - "edu", - "fj", - "gd", - "gov", - "gs", - "gx", - "gz", - "ha", - "hb", - "he", - "hi", - "hk", - "hl", - "hn", - "jl", - "js", - "jx", - "ln", - "mil", - "mo", - "net", - "nm", - "nx", - "org", - "qh", - "sc", - "sd", - "sh", - "sn", - "sx", - "tj", - "tw", - "xj", - "xn--55qx5d", - "xn--io0a7i", - "xn--od0alg", - "xz", - "yn", - "zj", - "amazonaws", - "cn-north-1", - "compute", - "eb", - "elb", - "s3", - "cn-north-1", - "arts", - "com", - "edu", - "firm", - "gov", - "info", - "int", - "mil", - "net", - "nodum", - "nom", - "org", - "rec", - "web", - "blogspot", - "0emm", - "1kapp", - "3utilities", - "4u", - "africa", - "alpha-myqnapcloud", - "amazonaws", - "appchizi", - "applinzi", - "appspot", - "ar", - "barsyonline", - "betainabox", - "bitballoon", - "blogdns", - "blogspot", - "blogsyte", - "bloxcms", - "bounty-full", - "bplaced", - "br", - "cechire", - "ciscofreak", - "cloudcontrolapp", - "cloudcontrolled", - "cn", - "co", - "codespot", - "damnserver", - "ddnsfree", - "ddnsgeek", - "ddnsking", - "de", - "dev-myqnapcloud", - "ditchyourip", - "dnsalias", - "dnsdojo", - "dnsiskinky", - "doesntexist", - "dontexist", - "doomdns", - "drayddns", - "dreamhosters", - "dsmynas", - "dyn-o-saur", - "dynalias", - "dyndns-at-home", - "dyndns-at-work", - "dyndns-blog", - "dyndns-free", - "dyndns-home", - "dyndns-ip", - "dyndns-mail", - "dyndns-office", - "dyndns-pics", - "dyndns-remote", - "dyndns-server", - "dyndns-web", - "dyndns-wiki", - "dyndns-work", - "dynns", - "elasticbeanstalk", - "est-a-la-maison", - "est-a-la-masion", - "est-le-patron", - "est-mon-blogueur", - "eu", - "evennode", - "familyds", - "fbsbx", - "firebaseapp", - "firewall-gateway", - "flynnhub", - "freebox-os", - "freeboxos", - "from-ak", - "from-al", - "from-ar", - "from-ca", - "from-ct", - "from-dc", - "from-de", - "from-fl", - "from-ga", - "from-hi", - "from-ia", - "from-id", - "from-il", - "from-in", - "from-ks", - "from-ky", - "from-ma", - "from-md", - "from-mi", - "from-mn", - "from-mo", - "from-ms", - "from-mt", - "from-nc", - "from-nd", - "from-ne", - "from-nh", - "from-nj", - "from-nm", - "from-nv", - "from-oh", - "from-ok", - "from-or", - "from-pa", - "from-pr", - "from-ri", - "from-sc", - "from-sd", - "from-tn", - "from-tx", - "from-ut", - "from-va", - "from-vt", - "from-wa", - "from-wi", - "from-wv", - "from-wy", - "gb", - "geekgalaxy", - "getmyip", - "giize", - "githubusercontent", - "gleeze", - "googleapis", - "googlecode", - "gotdns", - "gotpantheon", - "gr", - "health-carereform", - "herokuapp", - "herokussl", - "hk", - "hobby-site", - "homelinux", - "homesecuritymac", - "homesecuritypc", - "homeunix", - "hu", - "iamallama", - "is-a-anarchist", - "is-a-blogger", - "is-a-bookkeeper", - "is-a-bulls-fan", - "is-a-caterer", - "is-a-chef", - "is-a-conservative", - "is-a-cpa", - "is-a-cubicle-slave", - "is-a-democrat", - "is-a-designer", - "is-a-doctor", - "is-a-financialadvisor", - "is-a-geek", - "is-a-green", - "is-a-guru", - "is-a-hard-worker", - "is-a-hunter", - "is-a-landscaper", - "is-a-lawyer", - "is-a-liberal", - "is-a-libertarian", - "is-a-llama", - "is-a-musician", - "is-a-nascarfan", - "is-a-nurse", - "is-a-painter", - "is-a-personaltrainer", - "is-a-photographer", - "is-a-player", - "is-a-republican", - "is-a-rockstar", - "is-a-socialist", - "is-a-student", - "is-a-teacher", - "is-a-techie", - "is-a-therapist", - "is-an-accountant", - "is-an-actor", - "is-an-actress", - "is-an-anarchist", - "is-an-artist", - "is-an-engineer", - "is-an-entertainer", - "is-certified", - "is-gone", - "is-into-anime", - "is-into-cars", - "is-into-cartoons", - "is-into-games", - "is-leet", - "is-not-certified", - "is-slick", - "is-uberleet", - "is-with-theband", - "isa-geek", - "isa-hockeynut", - "issmarterthanyou", - "jdevcloud", - "joyent", - "jpn", - "kozow", - "kr", - "likes-pie", - "likescandy", - "logoip", - "loseyourip", - "meteorapp", - "mex", - "myactivedirectory", - "myasustor", - "mydrobo", - "myqnapcloud", - "mysecuritycamera", - "myshopblocks", - "mytuleap", - "myvnc", - "neat-url", - "net-freaks", - "netlify", - "nfshost", - "no", - "on-aptible", - "onthewifi", - "ooguy", - "operaunite", - "outsystemscloud", - "ownprovider", - "pagefrontapp", - "pagespeedmobilizer", - "pgfog", - "pixolino", - "point2this", - "prgmr", - "publishproxy", - "qa2", - "qc", - "quicksytes", - "quipelements", - "rackmaze", - "remotewd", - "rhcloud", - "ru", - "sa", - "saves-the-whales", - "se", - "securitytactics", - "selfip", - "sells-for-less", - "sells-for-u", - "servebbs", - "servebeer", - "servecounterstrike", - "serveexchange", - "serveftp", - "servegame", - "servehalflife", - "servehttp", - "servehumour", - "serveirc", - "servemp3", - "servep2p", - "servepics", - "servequake", - "servesarcasm", - "simple-url", - "sinaapp", - "space-to-rent", - "stufftoread", - "teaches-yoga", - "temp-dns", - "theworkpc", - "townnews-staging", - "uk", - "unusualperson", - "us", - "uy", - "vipsinaapp", - "withgoogle", - "withyoutube", - "workisboring", - "wpdevcloud", - "writesthisblog", - "xenapponazure", - "yolasite", - "za", - "ap-northeast-1", - "ap-northeast-2", - "ap-south-1", - "ap-southeast-1", - "ap-southeast-2", - "ca-central-1", - "compute", - "compute-1", - "elb", - "eu-central-1", - "eu-west-1", - "eu-west-2", - "s3", - "s3-ap-northeast-1", - "s3-ap-northeast-2", - "s3-ap-south-1", - "s3-ap-southeast-1", - "s3-ap-southeast-2", - "s3-ca-central-1", - "s3-eu-central-1", - "s3-eu-west-1", - "s3-eu-west-2", - "s3-external-1", - "s3-fips-us-gov-west-1", - "s3-sa-east-1", - "s3-us-east-2", - "s3-us-gov-west-1", - "s3-us-west-1", - "s3-us-west-2", - "s3-website-ap-northeast-1", - "s3-website-ap-southeast-1", - "s3-website-ap-southeast-2", - "s3-website-eu-west-1", - "s3-website-sa-east-1", - "s3-website-us-east-1", - "s3-website-us-west-1", - "s3-website-us-west-2", - "sa-east-1", - "us-east-1", - "us-east-2", - "dualstack", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "dualstack", - "s3", - "s3-website", - "s3", - "alpha", - "beta", - "ap-northeast-1", - "ap-northeast-2", - "ap-south-1", - "ap-southeast-1", - "ap-southeast-2", - "ca-central-1", - "eu-central-1", - "eu-west-1", - "eu-west-2", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-west-1", - "us-west-1", - "us-west-2", - "eu-1", - "eu-2", - "eu-3", - "eu-4", - "us-1", - "us-2", - "us-3", - "us-4", - "apps", - "cns", - "eu", - "xen", - "de", - "ac", - "co", - "ed", - "fi", - "go", - "or", - "sa", - "com", - "edu", - "gov", - "inf", - "net", - "org", - "blogspot", - "com", - "edu", - "net", - "org", - "ath", - "gov", - "info", - "ac", - "biz", - "com", - "ekloges", - "gov", - "ltd", - "name", - "net", - "org", - "parliament", - "press", - "pro", - "tm", - "blogspot", - "blogspot", - "co", - "e4", - "metacentrum", - "realm", - "cloud", - "custom", - "12hp", - "2ix", - "4lima", - "barsy", - "blogspot", - "bplaced", - "com", - "cosidns", - "dd-dns", - "ddnss", - "dnshome", - "dnsupdater", - "dray-dns", - "draydns", - "dyn-ip24", - "dyn-vpn", - "dynamisches-dns", - "dyndns1", - "dynvpn", - "firewall-gateway", - "fuettertdasnetz", - "git-repos", - "goip", - "home-webserver", - "internet-dns", - "isteingeek", - "istmein", - "keymachine", - "l-o-g-i-n", - "lcube-server", - "lebtimnetz", - "leitungsen", - "lima-city", - "logoip", - "mein-vigor", - "my-gateway", - "my-router", - "my-vigor", - "my-wan", - "myhome-server", - "spdns", - "square7", - "svn-repos", - "syno-ds", - "synology-diskstation", - "synology-ds", - "taifun-dns", - "traeumtgerade", - "dyn", - "dyn", - "dyndns", - "dyn", - "biz", - "blogspot", - "co", - "firm", - "reg", - "store", - "com", - "edu", - "gov", - "net", - "org", - "art", - "com", - "edu", - "gob", - "gov", - "mil", - "net", - "org", - "sld", - "web", - "art", - "asso", - "com", - "edu", - "gov", - "net", - "org", - "pol", - "com", - "edu", - "fin", - "gob", - "gov", - "info", - "k12", - "med", - "mil", - "net", - "org", - "pro", - "aip", - "com", - "edu", - "fie", - "gov", - "lib", - "med", - "org", - "pri", - "riik", - "blogspot", - "com", - "edu", - "eun", - "gov", - "mil", - "name", - "net", - "org", - "sci", - "blogspot", - "com", - "edu", - "gob", - "nom", - "org", - "blogspot", - "compute", - "biz", - "com", - "edu", - "gov", - "info", - "name", - "net", - "org", - "barsy", - "cloudns", - "diskstation", - "mycd", - "spdns", - "transurl", - "wellbeingzone", - "party", - "user", - "ybo", - "storj", - "aland", - "blogspot", - "dy", - "iki", - "ptplus", - "aeroport", - "assedic", - "asso", - "avocat", - "avoues", - "blogspot", - "cci", - "chambagri", - "chirurgiens-dentistes", - "chirurgiens-dentistes-en-france", - "com", - "experts-comptables", - "fbx-os", - "fbxos", - "freebox-os", - "freeboxos", - "geometre-expert", - "gouv", - "greta", - "huissier-justice", - "medecin", - "nom", - "notaires", - "on-web", - "pharmacien", - "port", - "prd", - "presse", - "tm", - "veterinaire", - "nom", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "pvt", - "co", - "cya", - "net", - "org", - "com", - "edu", - "gov", - "mil", - "org", - "com", - "edu", - "gov", - "ltd", - "mod", - "org", - "co", - "com", - "edu", - "net", - "nom", - "org", - "ac", - "com", - "edu", - "gov", - "net", - "org", - "cloud", - "asso", - "com", - "edu", - "mobi", - "net", - "org", - "blogspot", - "com", - "edu", - "gov", - "net", - "nym", - "org", - "com", - "edu", - "gob", - "ind", - "mil", - "net", - "nom", - "org", - "co", - "com", - "edu", - "gov", - "net", - "org", - "blogspot", - "com", - "edu", - "gov", - "idv", - "inc", - "ltd", - "net", - "org", - "xn--55qx5d", - "xn--ciqpn", - "xn--gmq050i", - "xn--gmqw5a", - "xn--io0a7i", - "xn--lcvr32d", - "xn--mk0axi", - "xn--mxtq1m", - "xn--od0alg", - "xn--od0aq3b", - "xn--tn0ag", - "xn--uc0atv", - "xn--uc0ay4a", - "xn--wcvs22d", - "xn--zf0avx", - "com", - "edu", - "gob", - "mil", - "net", - "nom", - "org", - "cloudaccess", - "freesite", - "opencraft", - "blogspot", - "com", - "from", - "iz", - "name", - "adult", - "art", - "asso", - "com", - "coop", - "edu", - "firm", - "gouv", - "info", - "med", - "net", - "org", - "perso", - "pol", - "pro", - "rel", - "shop", - "2000", - "agrar", - "blogspot", - "bolt", - "casino", - "city", - "co", - "erotica", - "erotika", - "film", - "forum", - "games", - "hotel", - "info", - "ingatlan", - "jogasz", - "konyvelo", - "lakas", - "media", - "news", - "org", - "priv", - "reklam", - "sex", - "shop", - "sport", - "suli", - "szex", - "tm", - "tozsde", - "utazas", - "video", - "ac", - "biz", - "co", - "desa", - "go", - "mil", - "my", - "net", - "or", - "sch", - "web", - "blogspot", - "blogspot", - "gov", - "ac", - "co", - "gov", - "idf", - "k12", - "muni", - "net", - "org", - "blogspot", - "ac", - "co", - "com", - "net", - "nom", - "org", - "ro", - "tt", - "tv", - "ltd", - "plc", - "ac", - "barsy", - "blogspot", - "cloudns", - "co", - "edu", - "firm", - "gen", - "gov", - "ind", - "mil", - "net", - "nic", - "org", - "res", - "barrel-of-knowledge", - "barrell-of-knowledge", - "cloudns", - "dvrcam", - "dynamic-dns", - "dyndns", - "for-our", - "groks-the", - "groks-this", - "here-for-more", - "ilovecollege", - "knowsitall", - "no-ip", - "nsupdate", - "selfip", - "v-info", - "webhop", - "eu", - "backplaneapp", - "boxfuse", - "browsersafetymark", - "com", - "dedyn", - "definima", - "drud", - "enonic", - "github", - "gitlab", - "hasura-app", - "hzc", - "lair", - "ngrok", - "nid", - "nodeart", - "nodum", - "pantheonsite", - "protonet", - "sandcats", - "shiftedit", - "spacekit", - "stolos", - "thingdust", - "vaporcloud", - "wedeploy", - "customer", - "apps", - "stage", - "dev", - "disrec", - "prod", - "testing", - "cust", - "cust", - "cust", - "cust", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "ac", - "co", - "gov", - "id", - "net", - "org", - "sch", - "xn--mgba3a4f16a", - "xn--mgba3a4fra", - "blogspot", - "com", - "cupcake", - "edu", - "gov", - "int", - "net", - "org", - "abr", - "abruzzo", - "ag", - "agrigento", - "al", - "alessandria", - "alto-adige", - "altoadige", - "an", - "ancona", - "andria-barletta-trani", - "andria-trani-barletta", - "andriabarlettatrani", - "andriatranibarletta", - "ao", - "aosta", - "aosta-valley", - "aostavalley", - "aoste", - "ap", - "aq", - "aquila", - "ar", - "arezzo", - "ascoli-piceno", - "ascolipiceno", - "asti", - "at", - "av", - "avellino", - "ba", - "balsan", - "bari", - "barletta-trani-andria", - "barlettatraniandria", - "bas", - "basilicata", - "belluno", - "benevento", - "bergamo", - "bg", - "bi", - "biella", - "bl", - "blogspot", - "bn", - "bo", - "bologna", - "bolzano", - "bozen", - "br", - "brescia", - "brindisi", - "bs", - "bt", - "bz", - "ca", - "cagliari", - "cal", - "calabria", - "caltanissetta", - "cam", - "campania", - "campidano-medio", - "campidanomedio", - "campobasso", - "carbonia-iglesias", - "carboniaiglesias", - "carrara-massa", - "carraramassa", - "caserta", - "catania", - "catanzaro", - "cb", - "ce", - "cesena-forli", - "cesenaforli", - "ch", - "chieti", - "ci", - "cl", - "cn", - "co", - "como", - "cosenza", - "cr", - "cremona", - "crotone", - "cs", - "ct", - "cuneo", - "cz", - "dell-ogliastra", - "dellogliastra", - "edu", - "emilia-romagna", - "emiliaromagna", - "emr", - "en", - "enna", - "fc", - "fe", - "fermo", - "ferrara", - "fg", - "fi", - "firenze", - "florence", - "fm", - "foggia", - "forli-cesena", - "forlicesena", - "fr", - "friuli-v-giulia", - "friuli-ve-giulia", - "friuli-vegiulia", - "friuli-venezia-giulia", - "friuli-veneziagiulia", - "friuli-vgiulia", - "friuliv-giulia", - "friulive-giulia", - "friulivegiulia", - "friulivenezia-giulia", - "friuliveneziagiulia", - "friulivgiulia", - "frosinone", - "fvg", - "ge", - "genoa", - "genova", - "go", - "gorizia", - "gov", - "gr", - "grosseto", - "iglesias-carbonia", - "iglesiascarbonia", - "im", - "imperia", - "is", - "isernia", - "kr", - "la-spezia", - "laquila", - "laspezia", - "latina", - "laz", - "lazio", - "lc", - "le", - "lecce", - "lecco", - "li", - "lig", - "liguria", - "livorno", - "lo", - "lodi", - "lom", - "lombardia", - "lombardy", - "lt", - "lu", - "lucania", - "lucca", - "macerata", - "mantova", - "mar", - "marche", - "massa-carrara", - "massacarrara", - "matera", - "mb", - "mc", - "me", - "medio-campidano", - "mediocampidano", - "messina", - "mi", - "milan", - "milano", - "mn", - "mo", - "modena", - "mol", - "molise", - "monza", - "monza-brianza", - "monza-e-della-brianza", - "monzabrianza", - "monzaebrianza", - "monzaedellabrianza", - "ms", - "mt", - "na", - "naples", - "napoli", - "no", - "novara", - "nu", - "nuoro", - "og", - "ogliastra", - "olbia-tempio", - "olbiatempio", - "or", - "oristano", - "ot", - "pa", - "padova", - "padua", - "palermo", - "parma", - "pavia", - "pc", - "pd", - "pe", - "perugia", - "pesaro-urbino", - "pesarourbino", - "pescara", - "pg", - "pi", - "piacenza", - "piedmont", - "piemonte", - "pisa", - "pistoia", - "pmn", - "pn", - "po", - "pordenone", - "potenza", - "pr", - "prato", - "pt", - "pu", - "pug", - "puglia", - "pv", - "pz", - "ra", - "ragusa", - "ravenna", - "rc", - "re", - "reggio-calabria", - "reggio-emilia", - "reggiocalabria", - "reggioemilia", - "rg", - "ri", - "rieti", - "rimini", - "rm", - "rn", - "ro", - "roma", - "rome", - "rovigo", - "sa", - "salerno", - "sar", - "sardegna", - "sardinia", - "sassari", - "savona", - "si", - "sic", - "sicilia", - "sicily", - "siena", - "siracusa", - "so", - "sondrio", - "sp", - "sr", - "ss", - "suedtirol", - "sv", - "ta", - "taa", - "taranto", - "te", - "tempio-olbia", - "tempioolbia", - "teramo", - "terni", - "tn", - "to", - "torino", - "tos", - "toscana", - "tp", - "tr", - "trani-andria-barletta", - "trani-barletta-andria", - "traniandriabarletta", - "tranibarlettaandria", - "trapani", - "trentino", - "trentino-a-adige", - "trentino-aadige", - "trentino-alto-adige", - "trentino-altoadige", - "trentino-s-tirol", - "trentino-stirol", - "trentino-sud-tirol", - "trentino-sudtirol", - "trentino-sued-tirol", - "trentino-suedtirol", - "trentinoa-adige", - "trentinoaadige", - "trentinoalto-adige", - "trentinoaltoadige", - "trentinos-tirol", - "trentinostirol", - "trentinosud-tirol", - "trentinosudtirol", - "trentinosued-tirol", - "trentinosuedtirol", - "trento", - "treviso", - "trieste", - "ts", - "turin", - "tuscany", - "tv", - "ud", - "udine", - "umb", - "umbria", - "urbino-pesaro", - "urbinopesaro", - "va", - "val-d-aosta", - "val-daosta", - "vald-aosta", - "valdaosta", - "valle-aosta", - "valle-d-aosta", - "valle-daosta", - "valleaosta", - "valled-aosta", - "valledaosta", - "vallee-aoste", - "valleeaoste", - "vao", - "varese", - "vb", - "vc", - "vda", - "ve", - "ven", - "veneto", - "venezia", - "venice", - "verbania", - "vercelli", - "verona", - "vi", - "vibo-valentia", - "vibovalentia", - "vicenza", - "viterbo", - "vr", - "vs", - "vt", - "vv", - "co", - "net", - "org", - "com", - "edu", - "gov", - "mil", - "name", - "net", - "org", - "sch", - "ac", - "ad", - "aichi", - "akita", - "aomori", - "blogspot", - "chiba", - "co", - "ed", - "ehime", - "fukui", - "fukuoka", - "fukushima", - "gifu", - "go", - "gr", - "gunma", - "hiroshima", - "hokkaido", - "hyogo", - "ibaraki", - "ishikawa", - "iwate", - "kagawa", - "kagoshima", - "kanagawa", - "kawasaki", - "kitakyushu", - "kobe", - "kochi", - "kumamoto", - "kyoto", - "lg", - "mie", - "miyagi", - "miyazaki", - "nagano", - "nagasaki", - "nagoya", - "nara", - "ne", - "niigata", - "oita", - "okayama", - "okinawa", - "or", - "osaka", - "saga", - "saitama", - "sapporo", - "sendai", - "shiga", - "shimane", - "shizuoka", - "tochigi", - "tokushima", - "tokyo", - "tottori", - "toyama", - "wakayama", - "xn--0trq7p7nn", - "xn--1ctwo", - "xn--1lqs03n", - "xn--1lqs71d", - "xn--2m4a15e", - "xn--32vp30h", - "xn--4it168d", - "xn--4it797k", - "xn--4pvxs", - "xn--5js045d", - "xn--5rtp49c", - "xn--5rtq34k", - "xn--6btw5a", - "xn--6orx2r", - "xn--7t0a264c", - "xn--8ltr62k", - "xn--8pvr4u", - "xn--c3s14m", - "xn--d5qv7z876c", - "xn--djrs72d6uy", - "xn--djty4k", - "xn--efvn9s", - "xn--ehqz56n", - "xn--elqq16h", - "xn--f6qx53a", - "xn--k7yn95e", - "xn--kbrq7o", - "xn--klt787d", - "xn--kltp7d", - "xn--kltx9a", - "xn--klty5x", - "xn--mkru45i", - "xn--nit225k", - "xn--ntso0iqx3a", - "xn--ntsq17g", - "xn--pssu33l", - "xn--qqqt11m", - "xn--rht27z", - "xn--rht3d", - "xn--rht61e", - "xn--rny31h", - "xn--tor131o", - "xn--uist22h", - "xn--uisz3g", - "xn--uuwu58a", - "xn--vgu402c", - "xn--zbx025d", - "yamagata", - "yamaguchi", - "yamanashi", - "yokohama", - "aisai", - "ama", - "anjo", - "asuke", - "chiryu", - "chita", - "fuso", - "gamagori", - "handa", - "hazu", - "hekinan", - "higashiura", - "ichinomiya", - "inazawa", - "inuyama", - "isshiki", - "iwakura", - "kanie", - "kariya", - "kasugai", - "kira", - "kiyosu", - "komaki", - "konan", - "kota", - "mihama", - "miyoshi", - "nishio", - "nisshin", - "obu", - "oguchi", - "oharu", - "okazaki", - "owariasahi", - "seto", - "shikatsu", - "shinshiro", - "shitara", - "tahara", - "takahama", - "tobishima", - "toei", - "togo", - "tokai", - "tokoname", - "toyoake", - "toyohashi", - "toyokawa", - "toyone", - "toyota", - "tsushima", - "yatomi", - "akita", - "daisen", - "fujisato", - "gojome", - "hachirogata", - "happou", - "higashinaruse", - "honjo", - "honjyo", - "ikawa", - "kamikoani", - "kamioka", - "katagami", - "kazuno", - "kitaakita", - "kosaka", - "kyowa", - "misato", - "mitane", - "moriyoshi", - "nikaho", - "noshiro", - "odate", - "oga", - "ogata", - "semboku", - "yokote", - "yurihonjo", - "aomori", - "gonohe", - "hachinohe", - "hashikami", - "hiranai", - "hirosaki", - "itayanagi", - "kuroishi", - "misawa", - "mutsu", - "nakadomari", - "noheji", - "oirase", - "owani", - "rokunohe", - "sannohe", - "shichinohe", - "shingo", - "takko", - "towada", - "tsugaru", - "tsuruta", - "abiko", - "asahi", - "chonan", - "chosei", - "choshi", - "chuo", - "funabashi", - "futtsu", - "hanamigawa", - "ichihara", - "ichikawa", - "ichinomiya", - "inzai", - "isumi", - "kamagaya", - "kamogawa", - "kashiwa", - "katori", - "katsuura", - "kimitsu", - "kisarazu", - "kozaki", - "kujukuri", - "kyonan", - "matsudo", - "midori", - "mihama", - "minamiboso", - "mobara", - "mutsuzawa", - "nagara", - "nagareyama", - "narashino", - "narita", - "noda", - "oamishirasato", - "omigawa", - "onjuku", - "otaki", - "sakae", - "sakura", - "shimofusa", - "shirako", - "shiroi", - "shisui", - "sodegaura", - "sosa", - "tako", - "tateyama", - "togane", - "tohnosho", - "tomisato", - "urayasu", - "yachimata", - "yachiyo", - "yokaichiba", - "yokoshibahikari", - "yotsukaido", - "ainan", - "honai", - "ikata", - "imabari", - "iyo", - "kamijima", - "kihoku", - "kumakogen", - "masaki", - "matsuno", - "matsuyama", - "namikata", - "niihama", - "ozu", - "saijo", - "seiyo", - "shikokuchuo", - "tobe", - "toon", - "uchiko", - "uwajima", - "yawatahama", - "echizen", - "eiheiji", - "fukui", - "ikeda", - "katsuyama", - "mihama", - "minamiechizen", - "obama", - "ohi", - "ono", - "sabae", - "sakai", - "takahama", - "tsuruga", - "wakasa", - "ashiya", - "buzen", - "chikugo", - "chikuho", - "chikujo", - "chikushino", - "chikuzen", - "chuo", - "dazaifu", - "fukuchi", - "hakata", - "higashi", - "hirokawa", - "hisayama", - "iizuka", - "inatsuki", - "kaho", - "kasuga", - "kasuya", - "kawara", - "keisen", - "koga", - "kurate", - "kurogi", - "kurume", - "minami", - "miyako", - "miyama", - "miyawaka", - "mizumaki", - "munakata", - "nakagawa", - "nakama", - "nishi", - "nogata", - "ogori", - "okagaki", - "okawa", - "oki", - "omuta", - "onga", - "onojo", - "oto", - "saigawa", - "sasaguri", - "shingu", - "shinyoshitomi", - "shonai", - "soeda", - "sue", - "tachiarai", - "tagawa", - "takata", - "toho", - "toyotsu", - "tsuiki", - "ukiha", - "umi", - "usui", - "yamada", - "yame", - "yanagawa", - "yukuhashi", - "aizubange", - "aizumisato", - "aizuwakamatsu", - "asakawa", - "bandai", - "date", - "fukushima", - "furudono", - "futaba", - "hanawa", - "higashi", - "hirata", - "hirono", - "iitate", - "inawashiro", - "ishikawa", - "iwaki", - "izumizaki", - "kagamiishi", - "kaneyama", - "kawamata", - "kitakata", - "kitashiobara", - "koori", - "koriyama", - "kunimi", - "miharu", - "mishima", - "namie", - "nango", - "nishiaizu", - "nishigo", - "okuma", - "omotego", - "ono", - "otama", - "samegawa", - "shimogo", - "shirakawa", - "showa", - "soma", - "sukagawa", - "taishin", - "tamakawa", - "tanagura", - "tenei", - "yabuki", - "yamato", - "yamatsuri", - "yanaizu", - "yugawa", - "anpachi", - "ena", - "gifu", - "ginan", - "godo", - "gujo", - "hashima", - "hichiso", - "hida", - "higashishirakawa", - "ibigawa", - "ikeda", - "kakamigahara", - "kani", - "kasahara", - "kasamatsu", - "kawaue", - "kitagata", - "mino", - "minokamo", - "mitake", - "mizunami", - "motosu", - "nakatsugawa", - "ogaki", - "sakahogi", - "seki", - "sekigahara", - "shirakawa", - "tajimi", - "takayama", - "tarui", - "toki", - "tomika", - "wanouchi", - "yamagata", - "yaotsu", - "yoro", - "annaka", - "chiyoda", - "fujioka", - "higashiagatsuma", - "isesaki", - "itakura", - "kanna", - "kanra", - "katashina", - "kawaba", - "kiryu", - "kusatsu", - "maebashi", - "meiwa", - "midori", - "minakami", - "naganohara", - "nakanojo", - "nanmoku", - "numata", - "oizumi", - "ora", - "ota", - "shibukawa", - "shimonita", - "shinto", - "showa", - "takasaki", - "takayama", - "tamamura", - "tatebayashi", - "tomioka", - "tsukiyono", - "tsumagoi", - "ueno", - "yoshioka", - "asaminami", - "daiwa", - "etajima", - "fuchu", - "fukuyama", - "hatsukaichi", - "higashihiroshima", - "hongo", - "jinsekikogen", - "kaita", - "kui", - "kumano", - "kure", - "mihara", - "miyoshi", - "naka", - "onomichi", - "osakikamijima", - "otake", - "saka", - "sera", - "seranishi", - "shinichi", - "shobara", - "takehara", - "abashiri", - "abira", - "aibetsu", - "akabira", - "akkeshi", - "asahikawa", - "ashibetsu", - "ashoro", - "assabu", - "atsuma", - "bibai", - "biei", - "bifuka", - "bihoro", - "biratori", - "chippubetsu", - "chitose", - "date", - "ebetsu", - "embetsu", - "eniwa", - "erimo", - "esan", - "esashi", - "fukagawa", - "fukushima", - "furano", - "furubira", - "haboro", - "hakodate", - "hamatonbetsu", - "hidaka", - "higashikagura", - "higashikawa", - "hiroo", - "hokuryu", - "hokuto", - "honbetsu", - "horokanai", - "horonobe", - "ikeda", - "imakane", - "ishikari", - "iwamizawa", - "iwanai", - "kamifurano", - "kamikawa", - "kamishihoro", - "kamisunagawa", - "kamoenai", - "kayabe", - "kembuchi", - "kikonai", - "kimobetsu", - "kitahiroshima", - "kitami", - "kiyosato", - "koshimizu", - "kunneppu", - "kuriyama", - "kuromatsunai", - "kushiro", - "kutchan", - "kyowa", - "mashike", - "matsumae", - "mikasa", - "minamifurano", - "mombetsu", - "moseushi", - "mukawa", - "muroran", - "naie", - "nakagawa", - "nakasatsunai", - "nakatombetsu", - "nanae", - "nanporo", - "nayoro", - "nemuro", - "niikappu", - "niki", - "nishiokoppe", - "noboribetsu", - "numata", - "obihiro", - "obira", - "oketo", - "okoppe", - "otaru", - "otobe", - "otofuke", - "otoineppu", - "oumu", - "ozora", - "pippu", - "rankoshi", - "rebun", - "rikubetsu", - "rishiri", - "rishirifuji", - "saroma", - "sarufutsu", - "shakotan", - "shari", - "shibecha", - "shibetsu", - "shikabe", - "shikaoi", - "shimamaki", - "shimizu", - "shimokawa", - "shinshinotsu", - "shintoku", - "shiranuka", - "shiraoi", - "shiriuchi", - "sobetsu", - "sunagawa", - "taiki", - "takasu", - "takikawa", - "takinoue", - "teshikaga", - "tobetsu", - "tohma", - "tomakomai", - "tomari", - "toya", - "toyako", - "toyotomi", - "toyoura", - "tsubetsu", - "tsukigata", - "urakawa", - "urausu", - "uryu", - "utashinai", - "wakkanai", - "wassamu", - "yakumo", - "yoichi", - "aioi", - "akashi", - "ako", - "amagasaki", - "aogaki", - "asago", - "ashiya", - "awaji", - "fukusaki", - "goshiki", - "harima", - "himeji", - "ichikawa", - "inagawa", - "itami", - "kakogawa", - "kamigori", - "kamikawa", - "kasai", - "kasuga", - "kawanishi", - "miki", - "minamiawaji", - "nishinomiya", - "nishiwaki", - "ono", - "sanda", - "sannan", - "sasayama", - "sayo", - "shingu", - "shinonsen", - "shiso", - "sumoto", - "taishi", - "taka", - "takarazuka", - "takasago", - "takino", - "tamba", - "tatsuno", - "toyooka", - "yabu", - "yashiro", - "yoka", - "yokawa", - "ami", - "asahi", - "bando", - "chikusei", - "daigo", - "fujishiro", - "hitachi", - "hitachinaka", - "hitachiomiya", - "hitachiota", - "ibaraki", - "ina", - "inashiki", - "itako", - "iwama", - "joso", - "kamisu", - "kasama", - "kashima", - "kasumigaura", - "koga", - "miho", - "mito", - "moriya", - "naka", - "namegata", - "oarai", - "ogawa", - "omitama", - "ryugasaki", - "sakai", - "sakuragawa", - "shimodate", - "shimotsuma", - "shirosato", - "sowa", - "suifu", - "takahagi", - "tamatsukuri", - "tokai", - "tomobe", - "tone", - "toride", - "tsuchiura", - "tsukuba", - "uchihara", - "ushiku", - "yachiyo", - "yamagata", - "yawara", - "yuki", - "anamizu", - "hakui", - "hakusan", - "kaga", - "kahoku", - "kanazawa", - "kawakita", - "komatsu", - "nakanoto", - "nanao", - "nomi", - "nonoichi", - "noto", - "shika", - "suzu", - "tsubata", - "tsurugi", - "uchinada", - "wajima", - "fudai", - "fujisawa", - "hanamaki", - "hiraizumi", - "hirono", - "ichinohe", - "ichinoseki", - "iwaizumi", - "iwate", - "joboji", - "kamaishi", - "kanegasaki", - "karumai", - "kawai", - "kitakami", - "kuji", - "kunohe", - "kuzumaki", - "miyako", - "mizusawa", - "morioka", - "ninohe", - "noda", - "ofunato", - "oshu", - "otsuchi", - "rikuzentakata", - "shiwa", - "shizukuishi", - "sumita", - "tanohata", - "tono", - "yahaba", - "yamada", - "ayagawa", - "higashikagawa", - "kanonji", - "kotohira", - "manno", - "marugame", - "mitoyo", - "naoshima", - "sanuki", - "tadotsu", - "takamatsu", - "tonosho", - "uchinomi", - "utazu", - "zentsuji", - "akune", - "amami", - "hioki", - "isa", - "isen", - "izumi", - "kagoshima", - "kanoya", - "kawanabe", - "kinko", - "kouyama", - "makurazaki", - "matsumoto", - "minamitane", - "nakatane", - "nishinoomote", - "satsumasendai", - "soo", - "tarumizu", - "yusui", - "aikawa", - "atsugi", - "ayase", - "chigasaki", - "ebina", - "fujisawa", - "hadano", - "hakone", - "hiratsuka", - "isehara", - "kaisei", - "kamakura", - "kiyokawa", - "matsuda", - "minamiashigara", - "miura", - "nakai", - "ninomiya", - "odawara", - "oi", - "oiso", - "sagamihara", - "samukawa", - "tsukui", - "yamakita", - "yamato", - "yokosuka", - "yugawara", - "zama", - "zushi", - "city", - "city", - "city", - "aki", - "geisei", - "hidaka", - "higashitsuno", - "ino", - "kagami", - "kami", - "kitagawa", - "kochi", - "mihara", - "motoyama", - "muroto", - "nahari", - "nakamura", - "nankoku", - "nishitosa", - "niyodogawa", - "ochi", - "okawa", - "otoyo", - "otsuki", - "sakawa", - "sukumo", - "susaki", - "tosa", - "tosashimizu", - "toyo", - "tsuno", - "umaji", - "yasuda", - "yusuhara", - "amakusa", - "arao", - "aso", - "choyo", - "gyokuto", - "kamiamakusa", - "kikuchi", - "kumamoto", - "mashiki", - "mifune", - "minamata", - "minamioguni", - "nagasu", - "nishihara", - "oguni", - "ozu", - "sumoto", - "takamori", - "uki", - "uto", - "yamaga", - "yamato", - "yatsushiro", - "ayabe", - "fukuchiyama", - "higashiyama", - "ide", - "ine", - "joyo", - "kameoka", - "kamo", - "kita", - "kizu", - "kumiyama", - "kyotamba", - "kyotanabe", - "kyotango", - "maizuru", - "minami", - "minamiyamashiro", - "miyazu", - "muko", - "nagaokakyo", - "nakagyo", - "nantan", - "oyamazaki", - "sakyo", - "seika", - "tanabe", - "uji", - "ujitawara", - "wazuka", - "yamashina", - "yawata", - "asahi", - "inabe", - "ise", - "kameyama", - "kawagoe", - "kiho", - "kisosaki", - "kiwa", - "komono", - "kumano", - "kuwana", - "matsusaka", - "meiwa", - "mihama", - "minamiise", - "misugi", - "miyama", - "nabari", - "shima", - "suzuka", - "tado", - "taiki", - "taki", - "tamaki", - "toba", - "tsu", - "udono", - "ureshino", - "watarai", - "yokkaichi", - "furukawa", - "higashimatsushima", - "ishinomaki", - "iwanuma", - "kakuda", - "kami", - "kawasaki", - "marumori", - "matsushima", - "minamisanriku", - "misato", - "murata", - "natori", - "ogawara", - "ohira", - "onagawa", - "osaki", - "rifu", - "semine", - "shibata", - "shichikashuku", - "shikama", - "shiogama", - "shiroishi", - "tagajo", - "taiwa", - "tome", - "tomiya", - "wakuya", - "watari", - "yamamoto", - "zao", - "aya", - "ebino", - "gokase", - "hyuga", - "kadogawa", - "kawaminami", - "kijo", - "kitagawa", - "kitakata", - "kitaura", - "kobayashi", - "kunitomi", - "kushima", - "mimata", - "miyakonojo", - "miyazaki", - "morotsuka", - "nichinan", - "nishimera", - "nobeoka", - "saito", - "shiiba", - "shintomi", - "takaharu", - "takanabe", - "takazaki", - "tsuno", - "achi", - "agematsu", - "anan", - "aoki", - "asahi", - "azumino", - "chikuhoku", - "chikuma", - "chino", - "fujimi", - "hakuba", - "hara", - "hiraya", - "iida", - "iijima", - "iiyama", - "iizuna", - "ikeda", - "ikusaka", - "ina", - "karuizawa", - "kawakami", - "kiso", - "kisofukushima", - "kitaaiki", - "komagane", - "komoro", - "matsukawa", - "matsumoto", - "miasa", - "minamiaiki", - "minamimaki", - "minamiminowa", - "minowa", - "miyada", - "miyota", - "mochizuki", - "nagano", - "nagawa", - "nagiso", - "nakagawa", - "nakano", - "nozawaonsen", - "obuse", - "ogawa", - "okaya", - "omachi", - "omi", - "ookuwa", - "ooshika", - "otaki", - "otari", - "sakae", - "sakaki", - "saku", - "sakuho", - "shimosuwa", - "shinanomachi", - "shiojiri", - "suwa", - "suzaka", - "takagi", - "takamori", - "takayama", - "tateshina", - "tatsuno", - "togakushi", - "togura", - "tomi", - "ueda", - "wada", - "yamagata", - "yamanouchi", - "yasaka", - "yasuoka", - "chijiwa", - "futsu", - "goto", - "hasami", - "hirado", - "iki", - "isahaya", - "kawatana", - "kuchinotsu", - "matsuura", - "nagasaki", - "obama", - "omura", - "oseto", - "saikai", - "sasebo", - "seihi", - "shimabara", - "shinkamigoto", - "togitsu", - "tsushima", - "unzen", - "city", - "ando", - "gose", - "heguri", - "higashiyoshino", - "ikaruga", - "ikoma", - "kamikitayama", - "kanmaki", - "kashiba", - "kashihara", - "katsuragi", - "kawai", - "kawakami", - "kawanishi", - "koryo", - "kurotaki", - "mitsue", - "miyake", - "nara", - "nosegawa", - "oji", - "ouda", - "oyodo", - "sakurai", - "sango", - "shimoichi", - "shimokitayama", - "shinjo", - "soni", - "takatori", - "tawaramoto", - "tenkawa", - "tenri", - "uda", - "yamatokoriyama", - "yamatotakada", - "yamazoe", - "yoshino", - "aga", - "agano", - "gosen", - "itoigawa", - "izumozaki", - "joetsu", - "kamo", - "kariwa", - "kashiwazaki", - "minamiuonuma", - "mitsuke", - "muika", - "murakami", - "myoko", - "nagaoka", - "niigata", - "ojiya", - "omi", - "sado", - "sanjo", - "seiro", - "seirou", - "sekikawa", - "shibata", - "tagami", - "tainai", - "tochio", - "tokamachi", - "tsubame", - "tsunan", - "uonuma", - "yahiko", - "yoita", - "yuzawa", - "beppu", - "bungoono", - "bungotakada", - "hasama", - "hiji", - "himeshima", - "hita", - "kamitsue", - "kokonoe", - "kuju", - "kunisaki", - "kusu", - "oita", - "saiki", - "taketa", - "tsukumi", - "usa", - "usuki", - "yufu", - "akaiwa", - "asakuchi", - "bizen", - "hayashima", - "ibara", - "kagamino", - "kasaoka", - "kibichuo", - "kumenan", - "kurashiki", - "maniwa", - "misaki", - "nagi", - "niimi", - "nishiawakura", - "okayama", - "satosho", - "setouchi", - "shinjo", - "shoo", - "soja", - "takahashi", - "tamano", - "tsuyama", - "wake", - "yakage", - "aguni", - "ginowan", - "ginoza", - "gushikami", - "haebaru", - "higashi", - "hirara", - "iheya", - "ishigaki", - "ishikawa", - "itoman", - "izena", - "kadena", - "kin", - "kitadaito", - "kitanakagusuku", - "kumejima", - "kunigami", - "minamidaito", - "motobu", - "nago", - "naha", - "nakagusuku", - "nakijin", - "nanjo", - "nishihara", - "ogimi", - "okinawa", - "onna", - "shimoji", - "taketomi", - "tarama", - "tokashiki", - "tomigusuku", - "tonaki", - "urasoe", - "uruma", - "yaese", - "yomitan", - "yonabaru", - "yonaguni", - "zamami", - "abeno", - "chihayaakasaka", - "chuo", - "daito", - "fujiidera", - "habikino", - "hannan", - "higashiosaka", - "higashisumiyoshi", - "higashiyodogawa", - "hirakata", - "ibaraki", - "ikeda", - "izumi", - "izumiotsu", - "izumisano", - "kadoma", - "kaizuka", - "kanan", - "kashiwara", - "katano", - "kawachinagano", - "kishiwada", - "kita", - "kumatori", - "matsubara", - "minato", - "minoh", - "misaki", - "moriguchi", - "neyagawa", - "nishi", - "nose", - "osakasayama", - "sakai", - "sayama", - "sennan", - "settsu", - "shijonawate", - "shimamoto", - "suita", - "tadaoka", - "taishi", - "tajiri", - "takaishi", - "takatsuki", - "tondabayashi", - "toyonaka", - "toyono", - "yao", - "ariake", - "arita", - "fukudomi", - "genkai", - "hamatama", - "hizen", - "imari", - "kamimine", - "kanzaki", - "karatsu", - "kashima", - "kitagata", - "kitahata", - "kiyama", - "kouhoku", - "kyuragi", - "nishiarita", - "ogi", - "omachi", - "ouchi", - "saga", - "shiroishi", - "taku", - "tara", - "tosu", - "yoshinogari", - "arakawa", - "asaka", - "chichibu", - "fujimi", - "fujimino", - "fukaya", - "hanno", - "hanyu", - "hasuda", - "hatogaya", - "hatoyama", - "hidaka", - "higashichichibu", - "higashimatsuyama", - "honjo", - "ina", - "iruma", - "iwatsuki", - "kamiizumi", - "kamikawa", - "kamisato", - "kasukabe", - "kawagoe", - "kawaguchi", - "kawajima", - "kazo", - "kitamoto", - "koshigaya", - "kounosu", - "kuki", - "kumagaya", - "matsubushi", - "minano", - "misato", - "miyashiro", - "miyoshi", - "moroyama", - "nagatoro", - "namegawa", - "niiza", - "ogano", - "ogawa", - "ogose", - "okegawa", - "omiya", - "otaki", - "ranzan", - "ryokami", - "saitama", - "sakado", - "satte", - "sayama", - "shiki", - "shiraoka", - "soka", - "sugito", - "toda", - "tokigawa", - "tokorozawa", - "tsurugashima", - "urawa", - "warabi", - "yashio", - "yokoze", - "yono", - "yorii", - "yoshida", - "yoshikawa", - "yoshimi", - "city", - "city", - "aisho", - "gamo", - "higashiomi", - "hikone", - "koka", - "konan", - "kosei", - "koto", - "kusatsu", - "maibara", - "moriyama", - "nagahama", - "nishiazai", - "notogawa", - "omihachiman", - "otsu", - "ritto", - "ryuoh", - "takashima", - "takatsuki", - "torahime", - "toyosato", - "yasu", - "akagi", - "ama", - "gotsu", - "hamada", - "higashiizumo", - "hikawa", - "hikimi", - "izumo", - "kakinoki", - "masuda", - "matsue", - "misato", - "nishinoshima", - "ohda", - "okinoshima", - "okuizumo", - "shimane", - "tamayu", - "tsuwano", - "unnan", - "yakumo", - "yasugi", - "yatsuka", - "arai", - "atami", - "fuji", - "fujieda", - "fujikawa", - "fujinomiya", - "fukuroi", - "gotemba", - "haibara", - "hamamatsu", - "higashiizu", - "ito", - "iwata", - "izu", - "izunokuni", - "kakegawa", - "kannami", - "kawanehon", - "kawazu", - "kikugawa", - "kosai", - "makinohara", - "matsuzaki", - "minamiizu", - "mishima", - "morimachi", - "nishiizu", - "numazu", - "omaezaki", - "shimada", - "shimizu", - "shimoda", - "shizuoka", - "susono", - "yaizu", - "yoshida", - "ashikaga", - "bato", - "haga", - "ichikai", - "iwafune", - "kaminokawa", - "kanuma", - "karasuyama", - "kuroiso", - "mashiko", - "mibu", - "moka", - "motegi", - "nasu", - "nasushiobara", - "nikko", - "nishikata", - "nogi", - "ohira", - "ohtawara", - "oyama", - "sakura", - "sano", - "shimotsuke", - "shioya", - "takanezawa", - "tochigi", - "tsuga", - "ujiie", - "utsunomiya", - "yaita", - "aizumi", - "anan", - "ichiba", - "itano", - "kainan", - "komatsushima", - "matsushige", - "mima", - "minami", - "miyoshi", - "mugi", - "nakagawa", - "naruto", - "sanagochi", - "shishikui", - "tokushima", - "wajiki", - "adachi", - "akiruno", - "akishima", - "aogashima", - "arakawa", - "bunkyo", - "chiyoda", - "chofu", - "chuo", - "edogawa", - "fuchu", - "fussa", - "hachijo", - "hachioji", - "hamura", - "higashikurume", - "higashimurayama", - "higashiyamato", - "hino", - "hinode", - "hinohara", - "inagi", - "itabashi", - "katsushika", - "kita", - "kiyose", - "kodaira", - "koganei", - "kokubunji", - "komae", - "koto", - "kouzushima", - "kunitachi", - "machida", - "meguro", - "minato", - "mitaka", - "mizuho", - "musashimurayama", - "musashino", - "nakano", - "nerima", - "ogasawara", - "okutama", - "ome", - "oshima", - "ota", - "setagaya", - "shibuya", - "shinagawa", - "shinjuku", - "suginami", - "sumida", - "tachikawa", - "taito", - "tama", - "toshima", - "chizu", - "hino", - "kawahara", - "koge", - "kotoura", - "misasa", - "nanbu", - "nichinan", - "sakaiminato", - "tottori", - "wakasa", - "yazu", - "yonago", - "asahi", - "fuchu", - "fukumitsu", - "funahashi", - "himi", - "imizu", - "inami", - "johana", - "kamiichi", - "kurobe", - "nakaniikawa", - "namerikawa", - "nanto", - "nyuzen", - "oyabe", - "taira", - "takaoka", - "tateyama", - "toga", - "tonami", - "toyama", - "unazuki", - "uozu", - "yamada", - "arida", - "aridagawa", - "gobo", - "hashimoto", - "hidaka", - "hirogawa", - "inami", - "iwade", - "kainan", - "kamitonda", - "katsuragi", - "kimino", - "kinokawa", - "kitayama", - "koya", - "koza", - "kozagawa", - "kudoyama", - "kushimoto", - "mihama", - "misato", - "nachikatsuura", - "shingu", - "shirahama", - "taiji", - "tanabe", - "wakayama", - "yuasa", - "yura", - "asahi", - "funagata", - "higashine", - "iide", - "kahoku", - "kaminoyama", - "kaneyama", - "kawanishi", - "mamurogawa", - "mikawa", - "murayama", - "nagai", - "nakayama", - "nanyo", - "nishikawa", - "obanazawa", - "oe", - "oguni", - "ohkura", - "oishida", - "sagae", - "sakata", - "sakegawa", - "shinjo", - "shirataka", - "shonai", - "takahata", - "tendo", - "tozawa", - "tsuruoka", - "yamagata", - "yamanobe", - "yonezawa", - "yuza", - "abu", - "hagi", - "hikari", - "hofu", - "iwakuni", - "kudamatsu", - "mitou", - "nagato", - "oshima", - "shimonoseki", - "shunan", - "tabuse", - "tokuyama", - "toyota", - "ube", - "yuu", - "chuo", - "doshi", - "fuefuki", - "fujikawa", - "fujikawaguchiko", - "fujiyoshida", - "hayakawa", - "hokuto", - "ichikawamisato", - "kai", - "kofu", - "koshu", - "kosuge", - "minami-alps", - "minobu", - "nakamichi", - "nanbu", - "narusawa", - "nirasaki", - "nishikatsura", - "oshino", - "otsuki", - "showa", - "tabayama", - "tsuru", - "uenohara", - "yamanakako", - "yamanashi", - "city", - "co", - "blogspot", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "biz", - "com", - "edu", - "gov", - "info", - "net", - "org", - "ass", - "asso", - "com", - "coop", - "edu", - "gouv", - "gov", - "medecin", - "mil", - "nom", - "notaires", - "org", - "pharmaciens", - "prd", - "presse", - "tm", - "veterinaire", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gov", - "org", - "rep", - "tra", - "ac", - "blogspot", - "busan", - "chungbuk", - "chungnam", - "co", - "daegu", - "daejeon", - "es", - "gangwon", - "go", - "gwangju", - "gyeongbuk", - "gyeonggi", - "gyeongnam", - "hs", - "incheon", - "jeju", - "jeonbuk", - "jeonnam", - "kg", - "mil", - "ms", - "ne", - "or", - "pe", - "re", - "sc", - "seoul", - "ulsan", - "co", - "edu", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gov", - "mil", - "net", - "nym", - "org", - "bnr", - "c", - "com", - "edu", - "gov", - "info", - "int", - "net", - "nym", - "org", - "per", - "static", - "dev", - "sites", - "com", - "edu", - "gov", - "net", - "org", - "co", - "com", - "edu", - "gov", - "net", - "org", - "oy", - "blogspot", - "nom", - "nym", - "cyon", - "mypep", - "ac", - "assn", - "com", - "edu", - "gov", - "grp", - "hotel", - "int", - "ltd", - "net", - "ngo", - "org", - "sch", - "soc", - "web", - "com", - "edu", - "gov", - "net", - "org", - "co", - "org", - "blogspot", - "gov", - "nym", - "blogspot", - "nym", - "asn", - "com", - "conf", - "edu", - "gov", - "id", - "mil", - "net", - "org", - "com", - "edu", - "gov", - "id", - "med", - "net", - "org", - "plc", - "sch", - "ac", - "co", - "gov", - "net", - "org", - "press", - "router", - "asso", - "tm", - "blogspot", - "ac", - "brasilia", - "c66", - "co", - "daplie", - "ddns", - "diskstation", - "dnsfor", - "dscloud", - "edu", - "filegear", - "gov", - "hopto", - "i234", - "its", - "loginto", - "myds", - "net", - "noip", - "nym", - "org", - "priv", - "synology", - "webhop", - "wedeploy", - "yombo", - "localhost", - "co", - "com", - "edu", - "gov", - "mil", - "nom", - "org", - "prd", - "tm", - "blogspot", - "com", - "edu", - "gov", - "inf", - "name", - "net", - "nom", - "org", - "com", - "edu", - "gouv", - "gov", - "net", - "org", - "presse", - "edu", - "gov", - "nyc", - "org", - "com", - "edu", - "gov", - "net", - "org", - "dscloud", - "blogspot", - "gov", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "net", - "org", - "blogspot", - "ac", - "co", - "com", - "gov", - "net", - "or", - "org", - "academy", - "agriculture", - "air", - "airguard", - "alabama", - "alaska", - "amber", - "ambulance", - "american", - "americana", - "americanantiques", - "americanart", - "amsterdam", - "and", - "annefrank", - "anthro", - "anthropology", - "antiques", - "aquarium", - "arboretum", - "archaeological", - "archaeology", - "architecture", - "art", - "artanddesign", - "artcenter", - "artdeco", - "arteducation", - "artgallery", - "arts", - "artsandcrafts", - "asmatart", - "assassination", - "assisi", - "association", - "astronomy", - "atlanta", - "austin", - "australia", - "automotive", - "aviation", - "axis", - "badajoz", - "baghdad", - "bahn", - "bale", - "baltimore", - "barcelona", - "baseball", - "basel", - "baths", - "bauern", - "beauxarts", - "beeldengeluid", - "bellevue", - "bergbau", - "berkeley", - "berlin", - "bern", - "bible", - "bilbao", - "bill", - "birdart", - "birthplace", - "bonn", - "boston", - "botanical", - "botanicalgarden", - "botanicgarden", - "botany", - "brandywinevalley", - "brasil", - "bristol", - "british", - "britishcolumbia", - "broadcast", - "brunel", - "brussel", - "brussels", - "bruxelles", - "building", - "burghof", - "bus", - "bushey", - "cadaques", - "california", - "cambridge", - "can", - "canada", - "capebreton", - "carrier", - "cartoonart", - "casadelamoneda", - "castle", - "castres", - "celtic", - "center", - "chattanooga", - "cheltenham", - "chesapeakebay", - "chicago", - "children", - "childrens", - "childrensgarden", - "chiropractic", - "chocolate", - "christiansburg", - "cincinnati", - "cinema", - "circus", - "civilisation", - "civilization", - "civilwar", - "clinton", - "clock", - "coal", - "coastaldefence", - "cody", - "coldwar", - "collection", - "colonialwilliamsburg", - "coloradoplateau", - "columbia", - "columbus", - "communication", - "communications", - "community", - "computer", - "computerhistory", - "contemporary", - "contemporaryart", - "convent", - "copenhagen", - "corporation", - "corvette", - "costume", - "countryestate", - "county", - "crafts", - "cranbrook", - "creation", - "cultural", - "culturalcenter", - "culture", - "cyber", - "cymru", - "dali", - "dallas", - "database", - "ddr", - "decorativearts", - "delaware", - "delmenhorst", - "denmark", - "depot", - "design", - "detroit", - "dinosaur", - "discovery", - "dolls", - "donostia", - "durham", - "eastafrica", - "eastcoast", - "education", - "educational", - "egyptian", - "eisenbahn", - "elburg", - "elvendrell", - "embroidery", - "encyclopedic", - "england", - "entomology", - "environment", - "environmentalconservation", - "epilepsy", - "essex", - "estate", - "ethnology", - "exeter", - "exhibition", - "family", - "farm", - "farmequipment", - "farmers", - "farmstead", - "field", - "figueres", - "filatelia", - "film", - "fineart", - "finearts", - "finland", - "flanders", - "florida", - "force", - "fortmissoula", - "fortworth", - "foundation", - "francaise", - "frankfurt", - "franziskaner", - "freemasonry", - "freiburg", - "fribourg", - "frog", - "fundacio", - "furniture", - "gallery", - "garden", - "gateway", - "geelvinck", - "gemological", - "geology", - "georgia", - "giessen", - "glas", - "glass", - "gorge", - "grandrapids", - "graz", - "guernsey", - "halloffame", - "hamburg", - "handson", - "harvestcelebration", - "hawaii", - "health", - "heimatunduhren", - "hellas", - "helsinki", - "hembygdsforbund", - "heritage", - "histoire", - "historical", - "historicalsociety", - "historichouses", - "historisch", - "historisches", - "history", - "historyofscience", - "horology", - "house", - "humanities", - "illustration", - "imageandsound", - "indian", - "indiana", - "indianapolis", - "indianmarket", - "intelligence", - "interactive", - "iraq", - "iron", - "isleofman", - "jamison", - "jefferson", - "jerusalem", - "jewelry", - "jewish", - "jewishart", - "jfk", - "journalism", - "judaica", - "judygarland", - "juedisches", - "juif", - "karate", - "karikatur", - "kids", - "koebenhavn", - "koeln", - "kunst", - "kunstsammlung", - "kunstunddesign", - "labor", - "labour", - "lajolla", - "lancashire", - "landes", - "lans", - "larsson", - "lewismiller", - "lincoln", - "linz", - "living", - "livinghistory", - "localhistory", - "london", - "losangeles", - "louvre", - "loyalist", - "lucerne", - "luxembourg", - "luzern", - "mad", - "madrid", - "mallorca", - "manchester", - "mansion", - "mansions", - "manx", - "marburg", - "maritime", - "maritimo", - "maryland", - "marylhurst", - "media", - "medical", - "medizinhistorisches", - "meeres", - "memorial", - "mesaverde", - "michigan", - "midatlantic", - "military", - "mill", - "miners", - "mining", - "minnesota", - "missile", - "missoula", - "modern", - "moma", - "money", - "monmouth", - "monticello", - "montreal", - "moscow", - "motorcycle", - "muenchen", - "muenster", - "mulhouse", - "muncie", - "museet", - "museumcenter", - "museumvereniging", - "music", - "national", - "nationalfirearms", - "nationalheritage", - "nativeamerican", - "naturalhistory", - "naturalhistorymuseum", - "naturalsciences", - "nature", - "naturhistorisches", - "natuurwetenschappen", - "naumburg", - "naval", - "nebraska", - "neues", - "newhampshire", - "newjersey", - "newmexico", - "newport", - "newspaper", - "newyork", - "niepce", - "norfolk", - "north", - "nrw", - "nuernberg", - "nuremberg", - "nyc", - "nyny", - "oceanographic", - "oceanographique", - "omaha", - "online", - "ontario", - "openair", - "oregon", - "oregontrail", - "otago", - "oxford", - "pacific", - "paderborn", - "palace", - "paleo", - "palmsprings", - "panama", - "paris", - "pasadena", - "pharmacy", - "philadelphia", - "philadelphiaarea", - "philately", - "phoenix", - "photography", - "pilots", - "pittsburgh", - "planetarium", - "plantation", - "plants", - "plaza", - "portal", - "portland", - "portlligat", - "posts-and-telecommunications", - "preservation", - "presidio", - "press", - "project", - "public", - "pubol", - "quebec", - "railroad", - "railway", - "research", - "resistance", - "riodejaneiro", - "rochester", - "rockart", - "roma", - "russia", - "saintlouis", - "salem", - "salvadordali", - "salzburg", - "sandiego", - "sanfrancisco", - "santabarbara", - "santacruz", - "santafe", - "saskatchewan", - "satx", - "savannahga", - "schlesisches", - "schoenbrunn", - "schokoladen", - "school", - "schweiz", - "science", - "science-fiction", - "scienceandhistory", - "scienceandindustry", - "sciencecenter", - "sciencecenters", - "sciencehistory", - "sciences", - "sciencesnaturelles", - "scotland", - "seaport", - "settlement", - "settlers", - "shell", - "sherbrooke", - "sibenik", - "silk", - "ski", - "skole", - "society", - "sologne", - "soundandvision", - "southcarolina", - "southwest", - "space", - "spy", - "square", - "stadt", - "stalbans", - "starnberg", - "state", - "stateofdelaware", - "station", - "steam", - "steiermark", - "stjohn", - "stockholm", - "stpetersburg", - "stuttgart", - "suisse", - "surgeonshall", - "surrey", - "svizzera", - "sweden", - "sydney", - "tank", - "tcm", - "technology", - "telekommunikation", - "television", - "texas", - "textile", - "theater", - "time", - "timekeeping", - "topology", - "torino", - "touch", - "town", - "transport", - "tree", - "trolley", - "trust", - "trustee", - "uhren", - "ulm", - "undersea", - "university", - "usa", - "usantiques", - "usarts", - "uscountryestate", - "usculture", - "usdecorativearts", - "usgarden", - "ushistory", - "ushuaia", - "uslivinghistory", - "utah", - "uvic", - "valley", - "vantaa", - "versailles", - "viking", - "village", - "virginia", - "virtual", - "virtuel", - "vlaanderen", - "volkenkunde", - "wales", - "wallonie", - "war", - "washingtondc", - "watch-and-clock", - "watchandclock", - "western", - "westfalen", - "whaling", - "wildlife", - "williamsburg", - "windmill", - "workshop", - "xn--9dbhblg6di", - "xn--comunicaes-v6a2o", - "xn--correios-e-telecomunicaes-ghc29a", - "xn--h1aegh", - "xn--lns-qla", - "york", - "yorkshire", - "yosemite", - "youth", - "zoological", - "zoology", - "aero", - "biz", - "com", - "coop", - "edu", - "gov", - "info", - "int", - "mil", - "museum", - "name", - "net", - "org", - "pro", - "ac", - "biz", - "co", - "com", - "coop", - "edu", - "gov", - "int", - "museum", - "net", - "org", - "blogspot", - "com", - "edu", - "gob", - "net", - "nym", - "org", - "blogspot", - "com", - "edu", - "gov", - "mil", - "name", - "net", - "org", - "ac", - "adv", - "co", - "edu", - "gov", - "mil", - "net", - "org", - "ca", - "cc", - "co", - "com", - "dr", - "in", - "info", - "mobi", - "mx", - "name", - "or", - "org", - "pro", - "school", - "tv", - "us", - "ws", - "her", - "his", - "forgot", - "forgot", - "asso", - "nom", - "alwaysdata", - "at-band-camp", - "azure-mobile", - "azurewebsites", - "barsy", - "blogdns", - "boomla", - "bounceme", - "bplaced", - "broke-it", - "buyshouses", - "casacam", - "cdn77", - "cdn77-ssl", - "channelsdvr", - "cloudaccess", - "cloudapp", - "cloudfront", - "cloudfunctions", - "cryptonomic", - "ddns", - "debian", - "definima", - "dnsalias", - "dnsdojo", - "does-it", - "dontexist", - "dsmynas", - "dynalias", - "dynathome", - "dynu", - "dynv6", - "eating-organic", - "endofinternet", - "familyds", - "fastly", - "fastlylb", - "feste-ip", - "firewall-gateway", - "flynnhosting", - "from-az", - "from-co", - "from-la", - "from-ny", - "gb", - "gets-it", - "ham-radio-op", - "homeftp", - "homeip", - "homelinux", - "homeunix", - "hu", - "in", - "in-the-band", - "ipifony", - "is-a-chef", - "is-a-geek", - "isa-geek", - "jp", - "kicks-ass", - "knx-server", - "moonscale", - "mydissent", - "myeffect", - "myfritz", - "mymediapc", - "mypsx", - "mysecuritycamera", - "nhlfan", - "no-ip", - "office-on-the", - "pgafan", - "podzone", - "privatizehealthinsurance", - "rackmaze", - "redirectme", - "ru", - "scrapper-site", - "se", - "selfip", - "sells-it", - "servebbs", - "serveblog", - "serveftp", - "serveminecraft", - "square7", - "static-access", - "sytes", - "t3l3p0rt", - "thruhere", - "twmail", - "uk", - "webhop", - "za", - "r", - "freetls", - "map", - "prod", - "ssl", - "a", - "global", - "a", - "b", - "global", - "map", - "alces", - "arts", - "com", - "firm", - "info", - "net", - "other", - "per", - "rec", - "store", - "web", - "com", - "edu", - "gov", - "i", - "mil", - "mobi", - "name", - "net", - "org", - "sch", - "blogspot", - "ac", - "biz", - "co", - "com", - "edu", - "gob", - "in", - "info", - "int", - "mil", - "net", - "nom", - "org", - "web", - "blogspot", - "bv", - "cistron", - "co", - "demon", - "transurl", - "virtueeldomein", - "aa", - "aarborte", - "aejrie", - "afjord", - "agdenes", - "ah", - "akershus", - "aknoluokta", - "akrehamn", - "al", - "alaheadju", - "alesund", - "algard", - "alstahaug", - "alta", - "alvdal", - "amli", - "amot", - "andasuolo", - "andebu", - "andoy", - "ardal", - "aremark", - "arendal", - "arna", - "aseral", - "asker", - "askim", - "askoy", - "askvoll", - "asnes", - "audnedaln", - "aukra", - "aure", - "aurland", - "aurskog-holand", - "austevoll", - "austrheim", - "averoy", - "badaddja", - "bahcavuotna", - "bahccavuotna", - "baidar", - "bajddar", - "balat", - "balestrand", - "ballangen", - "balsfjord", - "bamble", - "bardu", - "barum", - "batsfjord", - "bearalvahki", - "beardu", - "beiarn", - "berg", - "bergen", - "berlevag", - "bievat", - "bindal", - "birkenes", - "bjarkoy", - "bjerkreim", - "bjugn", - "blogspot", - "bodo", - "bokn", - "bomlo", - "bremanger", - "bronnoy", - "bronnoysund", - "brumunddal", - "bryne", - "bu", - "budejju", - "buskerud", - "bygland", - "bykle", - "cahcesuolo", - "co", - "davvenjarga", - "davvesiida", - "deatnu", - "dep", - "dielddanuorri", - "divtasvuodna", - "divttasvuotna", - "donna", - "dovre", - "drammen", - "drangedal", - "drobak", - "dyroy", - "egersund", - "eid", - "eidfjord", - "eidsberg", - "eidskog", - "eidsvoll", - "eigersund", - "elverum", - "enebakk", - "engerdal", - "etne", - "etnedal", - "evenassi", - "evenes", - "evje-og-hornnes", - "farsund", - "fauske", - "fedje", - "fet", - "fetsund", - "fhs", - "finnoy", - "fitjar", - "fjaler", - "fjell", - "fla", - "flakstad", - "flatanger", - "flekkefjord", - "flesberg", - "flora", - "floro", - "fm", - "folkebibl", - "folldal", - "forde", - "forsand", - "fosnes", - "frana", - "fredrikstad", - "frei", - "frogn", - "froland", - "frosta", - "froya", - "fuoisku", - "fuossko", - "fusa", - "fylkesbibl", - "fyresdal", - "gaivuotna", - "galsa", - "gamvik", - "gangaviika", - "gaular", - "gausdal", - "giehtavuoatna", - "gildeskal", - "giske", - "gjemnes", - "gjerdrum", - "gjerstad", - "gjesdal", - "gjovik", - "gloppen", - "gol", - "gran", - "grane", - "granvin", - "gratangen", - "grimstad", - "grong", - "grue", - "gulen", - "guovdageaidnu", - "ha", - "habmer", - "hadsel", - "hagebostad", - "halden", - "halsa", - "hamar", - "hamaroy", - "hammarfeasta", - "hammerfest", - "hapmir", - "haram", - "hareid", - "harstad", - "hasvik", - "hattfjelldal", - "haugesund", - "hedmark", - "hemne", - "hemnes", - "hemsedal", - "herad", - "hitra", - "hjartdal", - "hjelmeland", - "hl", - "hm", - "hobol", - "hof", - "hokksund", - "hol", - "hole", - "holmestrand", - "holtalen", - "honefoss", - "hordaland", - "hornindal", - "horten", - "hoyanger", - "hoylandet", - "hurdal", - "hurum", - "hvaler", - "hyllestad", - "ibestad", - "idrett", - "inderoy", - "iveland", - "ivgu", - "jan-mayen", - "jessheim", - "jevnaker", - "jolster", - "jondal", - "jorpeland", - "kafjord", - "karasjohka", - "karasjok", - "karlsoy", - "karmoy", - "kautokeino", - "kirkenes", - "klabu", - "klepp", - "kommune", - "kongsberg", - "kongsvinger", - "kopervik", - "kraanghke", - "kragero", - "kristiansand", - "kristiansund", - "krodsherad", - "krokstadelva", - "kvafjord", - "kvalsund", - "kvam", - "kvanangen", - "kvinesdal", - "kvinnherad", - "kviteseid", - "kvitsoy", - "laakesvuemie", - "lahppi", - "langevag", - "lardal", - "larvik", - "lavagis", - "lavangen", - "leangaviika", - "lebesby", - "leikanger", - "leirfjord", - "leirvik", - "leka", - "leksvik", - "lenvik", - "lerdal", - "lesja", - "levanger", - "lier", - "lierne", - "lillehammer", - "lillesand", - "lindas", - "lindesnes", - "loabat", - "lodingen", - "lom", - "loppa", - "lorenskog", - "loten", - "lund", - "lunner", - "luroy", - "luster", - "lyngdal", - "lyngen", - "malatvuopmi", - "malselv", - "malvik", - "mandal", - "marker", - "marnardal", - "masfjorden", - "masoy", - "matta-varjjat", - "meland", - "meldal", - "melhus", - "meloy", - "meraker", - "midsund", - "midtre-gauldal", - "mil", - "mjondalen", - "mo-i-rana", - "moareke", - "modalen", - "modum", - "molde", - "more-og-romsdal", - "mosjoen", - "moskenes", - "moss", - "mosvik", - "mr", - "muosat", - "museum", - "naamesjevuemie", - "namdalseid", - "namsos", - "namsskogan", - "nannestad", - "naroy", - "narviika", - "narvik", - "naustdal", - "navuotna", - "nedre-eiker", - "nesna", - "nesodden", - "nesoddtangen", - "nesseby", - "nesset", - "nissedal", - "nittedal", - "nl", - "nord-aurdal", - "nord-fron", - "nord-odal", - "norddal", - "nordkapp", - "nordland", - "nordre-land", - "nordreisa", - "nore-og-uvdal", - "notodden", - "notteroy", - "nt", - "odda", - "of", - "oksnes", - "ol", - "omasvuotna", - "oppdal", - "oppegard", - "orkanger", - "orkdal", - "orland", - "orskog", - "orsta", - "osen", - "oslo", - "osoyro", - "osteroy", - "ostfold", - "ostre-toten", - "overhalla", - "ovre-eiker", - "oyer", - "oygarden", - "oystre-slidre", - "porsanger", - "porsangu", - "porsgrunn", - "priv", - "rade", - "radoy", - "rahkkeravju", - "raholt", - "raisa", - "rakkestad", - "ralingen", - "rana", - "randaberg", - "rauma", - "rendalen", - "rennebu", - "rennesoy", - "rindal", - "ringebu", - "ringerike", - "ringsaker", - "risor", - "rissa", - "rl", - "roan", - "rodoy", - "rollag", - "romsa", - "romskog", - "roros", - "rost", - "royken", - "royrvik", - "ruovat", - "rygge", - "salangen", - "salat", - "saltdal", - "samnanger", - "sandefjord", - "sandnes", - "sandnessjoen", - "sandoy", - "sarpsborg", - "sauda", - "sauherad", - "sel", - "selbu", - "selje", - "seljord", - "sf", - "siellak", - "sigdal", - "siljan", - "sirdal", - "skanit", - "skanland", - "skaun", - "skedsmo", - "skedsmokorset", - "ski", - "skien", - "skierva", - "skiptvet", - "skjak", - "skjervoy", - "skodje", - "slattum", - "smola", - "snaase", - "snasa", - "snillfjord", - "snoasa", - "sogndal", - "sogne", - "sokndal", - "sola", - "solund", - "somna", - "sondre-land", - "songdalen", - "sor-aurdal", - "sor-fron", - "sor-odal", - "sor-varanger", - "sorfold", - "sorreisa", - "sortland", - "sorum", - "spjelkavik", - "spydeberg", - "st", - "stange", - "stat", - "stathelle", - "stavanger", - "stavern", - "steigen", - "steinkjer", - "stjordal", - "stjordalshalsen", - "stokke", - "stor-elvdal", - "stord", - "stordal", - "storfjord", - "strand", - "stranda", - "stryn", - "sula", - "suldal", - "sund", - "sunndal", - "surnadal", - "svalbard", - "sveio", - "svelvik", - "sykkylven", - "tana", - "tananger", - "telemark", - "time", - "tingvoll", - "tinn", - "tjeldsund", - "tjome", - "tm", - "tokke", - "tolga", - "tonsberg", - "torsken", - "tr", - "trana", - "tranby", - "tranoy", - "troandin", - "trogstad", - "tromsa", - "tromso", - "trondheim", - "trysil", - "tvedestrand", - "tydal", - "tynset", - "tysfjord", - "tysnes", - "tysvar", - "ullensaker", - "ullensvang", - "ulvik", - "unjarga", - "utsira", - "va", - "vaapste", - "vadso", - "vaga", - "vagan", - "vagsoy", - "vaksdal", - "valle", - "vang", - "vanylven", - "vardo", - "varggat", - "varoy", - "vefsn", - "vega", - "vegarshei", - "vennesla", - "verdal", - "verran", - "vestby", - "vestfold", - "vestnes", - "vestre-slidre", - "vestre-toten", - "vestvagoy", - "vevelstad", - "vf", - "vgs", - "vik", - "vikna", - "vindafjord", - "voagat", - "volda", - "voss", - "vossevangen", - "xn--andy-ira", - "xn--asky-ira", - "xn--aurskog-hland-jnb", - "xn--avery-yua", - "xn--bdddj-mrabd", - "xn--bearalvhki-y4a", - "xn--berlevg-jxa", - "xn--bhcavuotna-s4a", - "xn--bhccavuotna-k7a", - "xn--bidr-5nac", - "xn--bievt-0qa", - "xn--bjarky-fya", - "xn--bjddar-pta", - "xn--blt-elab", - "xn--bmlo-gra", - "xn--bod-2na", - "xn--brnny-wuac", - "xn--brnnysund-m8ac", - "xn--brum-voa", - "xn--btsfjord-9za", - "xn--davvenjrga-y4a", - "xn--dnna-gra", - "xn--drbak-wua", - "xn--dyry-ira", - "xn--eveni-0qa01ga", - "xn--finny-yua", - "xn--fjord-lra", - "xn--fl-zia", - "xn--flor-jra", - "xn--frde-gra", - "xn--frna-woa", - "xn--frya-hra", - "xn--ggaviika-8ya47h", - "xn--gildeskl-g0a", - "xn--givuotna-8ya", - "xn--gjvik-wua", - "xn--gls-elac", - "xn--h-2fa", - "xn--hbmer-xqa", - "xn--hcesuolo-7ya35b", - "xn--hgebostad-g3a", - "xn--hmmrfeasta-s4ac", - "xn--hnefoss-q1a", - "xn--hobl-ira", - "xn--holtlen-hxa", - "xn--hpmir-xqa", - "xn--hyanger-q1a", - "xn--hylandet-54a", - "xn--indery-fya", - "xn--jlster-bya", - "xn--jrpeland-54a", - "xn--karmy-yua", - "xn--kfjord-iua", - "xn--klbu-woa", - "xn--koluokta-7ya57h", - "xn--krager-gya", - "xn--kranghke-b0a", - "xn--krdsherad-m8a", - "xn--krehamn-dxa", - "xn--krjohka-hwab49j", - "xn--ksnes-uua", - "xn--kvfjord-nxa", - "xn--kvitsy-fya", - "xn--kvnangen-k0a", - "xn--l-1fa", - "xn--laheadju-7ya", - "xn--langevg-jxa", - "xn--ldingen-q1a", - "xn--leagaviika-52b", - "xn--lesund-hua", - "xn--lgrd-poac", - "xn--lhppi-xqa", - "xn--linds-pra", - "xn--loabt-0qa", - "xn--lrdal-sra", - "xn--lrenskog-54a", - "xn--lt-liac", - "xn--lten-gra", - "xn--lury-ira", - "xn--mely-ira", - "xn--merker-kua", - "xn--mjndalen-64a", - "xn--mlatvuopmi-s4a", - "xn--mli-tla", - "xn--mlselv-iua", - "xn--moreke-jua", - "xn--mosjen-eya", - "xn--mot-tla", - "xn--mre-og-romsdal-qqb", - "xn--msy-ula0h", - "xn--mtta-vrjjat-k7af", - "xn--muost-0qa", - "xn--nmesjevuemie-tcba", - "xn--nry-yla5g", - "xn--nttery-byae", - "xn--nvuotna-hwa", - "xn--oppegrd-ixa", - "xn--ostery-fya", - "xn--osyro-wua", - "xn--porsgu-sta26f", - "xn--rady-ira", - "xn--rdal-poa", - "xn--rde-ula", - "xn--rdy-0nab", - "xn--rennesy-v1a", - "xn--rhkkervju-01af", - "xn--rholt-mra", - "xn--risa-5na", - "xn--risr-ira", - "xn--rland-uua", - "xn--rlingen-mxa", - "xn--rmskog-bya", - "xn--rros-gra", - "xn--rskog-uua", - "xn--rst-0na", - "xn--rsta-fra", - "xn--ryken-vua", - "xn--ryrvik-bya", - "xn--s-1fa", - "xn--sandnessjen-ogb", - "xn--sandy-yua", - "xn--seral-lra", - "xn--sgne-gra", - "xn--skierv-uta", - "xn--skjervy-v1a", - "xn--skjk-soa", - "xn--sknit-yqa", - "xn--sknland-fxa", - "xn--slat-5na", - "xn--slt-elab", - "xn--smla-hra", - "xn--smna-gra", - "xn--snase-nra", - "xn--sndre-land-0cb", - "xn--snes-poa", - "xn--snsa-roa", - "xn--sr-aurdal-l8a", - "xn--sr-fron-q1a", - "xn--sr-odal-q1a", - "xn--sr-varanger-ggb", - "xn--srfold-bya", - "xn--srreisa-q1a", - "xn--srum-gra", - "xn--stfold-9xa", - "xn--stjrdal-s1a", - "xn--stjrdalshalsen-sqb", - "xn--stre-toten-zcb", - "xn--tjme-hra", - "xn--tnsberg-q1a", - "xn--trany-yua", - "xn--trgstad-r1a", - "xn--trna-woa", - "xn--troms-zua", - "xn--tysvr-vra", - "xn--unjrga-rta", - "xn--vads-jra", - "xn--vard-jra", - "xn--vegrshei-c0a", - "xn--vestvgy-ixa6o", - "xn--vg-yiab", - "xn--vgan-qoa", - "xn--vgsy-qoa0j", - "xn--vre-eiker-k8a", - "xn--vrggt-xqad", - "xn--vry-yla5g", - "xn--yer-zna", - "xn--ygarden-p1a", - "xn--ystre-slidre-ujb", - "gs", - "gs", - "nes", - "gs", - "nes", - "gs", - "os", - "valer", - "xn--vler-qoa", - "gs", - "gs", - "os", - "gs", - "heroy", - "sande", - "gs", - "gs", - "bo", - "heroy", - "xn--b-5ga", - "xn--hery-ira", - "gs", - "gs", - "gs", - "gs", - "valer", - "gs", - "gs", - "gs", - "gs", - "bo", - "xn--b-5ga", - "gs", - "gs", - "gs", - "sande", - "gs", - "sande", - "xn--hery-ira", - "xn--vler-qoa", - "biz", - "com", - "edu", - "gov", - "info", - "net", - "org", - "merseine", - "mine", - "nom", - "shacknet", - "ac", - "co", - "cri", - "geek", - "gen", - "govt", - "health", - "iwi", - "kiwi", - "maori", - "mil", - "net", - "nym", - "org", - "parliament", - "school", - "xn--mori-qsa", - "blogspot", - "co", - "com", - "edu", - "gov", - "med", - "museum", - "net", - "org", - "pro", - "homelink", - "barsy", - "accesscam", - "ae", - "amune", - "blogdns", - "blogsite", - "bmoattachments", - "boldlygoingnowhere", - "cable-modem", - "camdvr", - "cdn77", - "cdn77-secure", - "certmgr", - "cloudns", - "collegefan", - "couchpotatofries", - "ddnss", - "diskstation", - "dnsalias", - "dnsdojo", - "doesntexist", - "dontexist", - "doomdns", - "dsmynas", - "duckdns", - "dvrdns", - "dynalias", - "dyndns", - "endofinternet", - "endoftheinternet", - "eu", - "familyds", - "fedorainfracloud", - "fedorapeople", - "fedoraproject", - "freeddns", - "from-me", - "game-host", - "gotdns", - "hepforge", - "hk", - "hobby-site", - "homedns", - "homeftp", - "homelinux", - "homeunix", - "hopto", - "is-a-bruinsfan", - "is-a-candidate", - "is-a-celticsfan", - "is-a-chef", - "is-a-geek", - "is-a-knight", - "is-a-linux-user", - "is-a-patsfan", - "is-a-soxfan", - "is-found", - "is-lost", - "is-saved", - "is-very-bad", - "is-very-evil", - "is-very-good", - "is-very-nice", - "is-very-sweet", - "isa-geek", - "js", - "kicks-ass", - "misconfused", - "mlbfan", - "my-firewall", - "myfirewall", - "myftp", - "mysecuritycamera", - "mywire", - "nflfan", - "no-ip", - "pimienta", - "podzone", - "poivron", - "potager", - "read-books", - "readmyblog", - "selfip", - "sellsyourhome", - "servebbs", - "serveftp", - "servegame", - "spdns", - "stuff-4-sale", - "sweetpepper", - "tunk", - "tuxfamily", - "twmail", - "ufcfan", - "us", - "webhop", - "webredirect", - "wmflabs", - "za", - "zapto", - "tele", - "c", - "rsc", - "origin", - "ssl", - "go", - "home", - "al", - "asso", - "at", - "au", - "be", - "bg", - "ca", - "cd", - "ch", - "cn", - "cy", - "cz", - "de", - "dk", - "edu", - "ee", - "es", - "fi", - "fr", - "gr", - "hr", - "hu", - "ie", - "il", - "in", - "int", - "is", - "it", - "jp", - "kr", - "lt", - "lu", - "lv", - "mc", - "me", - "mk", - "mt", - "my", - "net", - "ng", - "nl", - "no", - "nz", - "paris", - "pl", - "pt", - "q-a", - "ro", - "ru", - "se", - "si", - "sk", - "tr", - "uk", - "us", - "cloud", - "nerdpol", - "abo", - "ac", - "com", - "edu", - "gob", - "ing", - "med", - "net", - "nom", - "org", - "sld", - "ybo", - "blogspot", - "com", - "edu", - "gob", - "mil", - "net", - "nom", - "nym", - "org", - "com", - "edu", - "org", - "com", - "edu", - "gov", - "i", - "mil", - "net", - "ngo", - "org", - "1337", - "biz", - "com", - "edu", - "fam", - "gob", - "gok", - "gon", - "gop", - "gos", - "gov", - "info", - "net", - "org", - "web", - "agro", - "aid", - "art", - "atm", - "augustow", - "auto", - "babia-gora", - "bedzin", - "beep", - "beskidy", - "bialowieza", - "bialystok", - "bielawa", - "bieszczady", - "biz", - "boleslawiec", - "bydgoszcz", - "bytom", - "cieszyn", - "co", - "com", - "czeladz", - "czest", - "dlugoleka", - "edu", - "elblag", - "elk", - "gda", - "gdansk", - "gdynia", - "gliwice", - "glogow", - "gmina", - "gniezno", - "gorlice", - "gov", - "grajewo", - "gsm", - "ilawa", - "info", - "jaworzno", - "jelenia-gora", - "jgora", - "kalisz", - "karpacz", - "kartuzy", - "kaszuby", - "katowice", - "kazimierz-dolny", - "kepno", - "ketrzyn", - "klodzko", - "kobierzyce", - "kolobrzeg", - "konin", - "konskowola", - "krakow", - "kutno", - "lapy", - "lebork", - "legnica", - "lezajsk", - "limanowa", - "lomza", - "lowicz", - "lubin", - "lukow", - "mail", - "malbork", - "malopolska", - "mazowsze", - "mazury", - "med", - "media", - "miasta", - "mielec", - "mielno", - "mil", - "mragowo", - "naklo", - "net", - "nieruchomosci", - "nom", - "nowaruda", - "nysa", - "olawa", - "olecko", - "olkusz", - "olsztyn", - "opoczno", - "opole", - "org", - "ostroda", - "ostroleka", - "ostrowiec", - "ostrowwlkp", - "pc", - "pila", - "pisz", - "podhale", - "podlasie", - "polkowice", - "pomorskie", - "pomorze", - "powiat", - "poznan", - "priv", - "prochowice", - "pruszkow", - "przeworsk", - "pulawy", - "radom", - "rawa-maz", - "realestate", - "rel", - "rybnik", - "rzeszow", - "sanok", - "sejny", - "sex", - "shop", - "sklep", - "skoczow", - "slask", - "slupsk", - "sopot", - "sos", - "sosnowiec", - "stalowa-wola", - "starachowice", - "stargard", - "suwalki", - "swidnica", - "swiebodzin", - "swinoujscie", - "szczecin", - "szczytno", - "szkola", - "targi", - "tarnobrzeg", - "tgory", - "tm", - "tourism", - "travel", - "turek", - "turystyka", - "tychy", - "ustka", - "walbrzych", - "warmia", - "warszawa", - "waw", - "wegrow", - "wielun", - "wlocl", - "wloclawek", - "wodzislaw", - "wolomin", - "wroc", - "wroclaw", - "zachpomor", - "zagan", - "zakopane", - "zarow", - "zgora", - "zgorzelec", - "ap", - "griw", - "ic", - "is", - "kmpsp", - "konsulat", - "kppsp", - "kwp", - "kwpsp", - "mup", - "mw", - "oirm", - "oum", - "pa", - "pinb", - "piw", - "po", - "psp", - "psse", - "pup", - "rzgw", - "sa", - "sdn", - "sko", - "so", - "sr", - "starostwo", - "ug", - "ugim", - "um", - "umig", - "upow", - "uppo", - "us", - "uw", - "uzs", - "wif", - "wiih", - "winb", - "wios", - "witd", - "wiw", - "wsa", - "wskr", - "wuoz", - "wzmiuw", - "zp", - "co", - "edu", - "gov", - "net", - "org", - "ac", - "biz", - "com", - "edu", - "est", - "gov", - "info", - "isla", - "name", - "net", - "org", - "pro", - "prof", - "aaa", - "aca", - "acct", - "avocat", - "bar", - "cloudns", - "cpa", - "eng", - "jur", - "law", - "med", - "recht", - "com", - "edu", - "gov", - "net", - "org", - "plo", - "sec", - "blogspot", - "com", - "edu", - "gov", - "int", - "net", - "nome", - "nym", - "org", - "publ", - "belau", - "cloudns", - "co", - "ed", - "go", - "ne", - "nom", - "or", - "com", - "coop", - "edu", - "gov", - "mil", - "net", - "org", - "blogspot", - "com", - "edu", - "gov", - "mil", - "name", - "net", - "nom", - "org", - "sch", - "asso", - "blogspot", - "com", - "nom", - "ybo", - "clan", - "arts", - "blogspot", - "com", - "firm", - "info", - "nom", - "nt", - "org", - "rec", - "shop", - "store", - "tm", - "www", - "lima-city", - "myddns", - "webspace", - "ac", - "blogspot", - "co", - "edu", - "gov", - "in", - "nom", - "org", - "ac", - "adygeya", - "bashkiria", - "bir", - "blogspot", - "cbg", - "cldmail", - "com", - "dagestan", - "edu", - "gov", - "grozny", - "int", - "kalmykia", - "kustanai", - "marine", - "mil", - "mordovia", - "msk", - "mytis", - "nalchik", - "net", - "nov", - "org", - "pp", - "pyatigorsk", - "spb", - "test", - "vladikavkaz", - "vladimir", - "hb", - "ac", - "co", - "com", - "edu", - "gouv", - "gov", - "int", - "mil", - "net", - "com", - "edu", - "gov", - "med", - "net", - "org", - "pub", - "sch", - "com", - "edu", - "gov", - "net", - "org", - "com", - "edu", - "gov", - "net", - "org", - "ybo", - "com", - "edu", - "gov", - "info", - "med", - "net", - "org", - "tv", - "a", - "ac", - "b", - "bd", - "blogspot", - "brand", - "c", - "com", - "d", - "e", - "f", - "fh", - "fhsk", - "fhv", - "g", - "h", - "i", - "k", - "komforb", - "kommunalforbund", - "komvux", - "l", - "lanbib", - "m", - "n", - "naturbruksgymn", - "o", - "org", - "p", - "parti", - "pp", - "press", - "r", - "s", - "t", - "tm", - "u", - "w", - "x", - "y", - "z", - "blogspot", - "com", - "edu", - "gov", - "net", - "org", - "per", - "com", - "gov", - "hashbang", - "mil", - "net", - "now", - "org", - "platform", - "wedeploy", - "blogspot", - "nom", - "byen", - "cyon", - "platformsh", - "blogspot", - "nym", - "com", - "edu", - "gov", - "net", - "org", - "art", - "blogspot", - "com", - "edu", - "gouv", - "org", - "perso", - "univ", - "com", - "net", - "org", - "stackspace", - "uber", - "xs4all", - "co", - "com", - "consulado", - "edu", - "embaixada", - "gov", - "mil", - "net", - "org", - "principe", - "saotome", - "store", - "abkhazia", - "adygeya", - "aktyubinsk", - "arkhangelsk", - "armenia", - "ashgabad", - "azerbaijan", - "balashov", - "bashkiria", - "bryansk", - "bukhara", - "chimkent", - "dagestan", - "east-kazakhstan", - "exnet", - "georgia", - "grozny", - "ivanovo", - "jambyl", - "kalmykia", - "kaluga", - "karacol", - "karaganda", - "karelia", - "khakassia", - "krasnodar", - "kurgan", - "kustanai", - "lenug", - "mangyshlak", - "mordovia", - "msk", - "murmansk", - "nalchik", - "navoi", - "north-kazakhstan", - "nov", - "nym", - "obninsk", - "penza", - "pokrovsk", - "sochi", - "spb", - "tashkent", - "termez", - "togliatti", - "troitsk", - "tselinograd", - "tula", - "tuva", - "vladikavkaz", - "vladimir", - "vologda", - "barsy", - "com", - "edu", - "gob", - "org", - "red", - "gov", - "nym", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "knightpoint", - "ac", - "co", - "org", - "blogspot", - "ac", - "co", - "go", - "in", - "mi", - "net", - "or", - "ac", - "biz", - "co", - "com", - "edu", - "go", - "gov", - "int", - "mil", - "name", - "net", - "nic", - "org", - "test", - "web", - "gov", - "co", - "com", - "edu", - "gov", - "mil", - "net", - "nom", - "org", - "agrinet", - "com", - "defense", - "edunet", - "ens", - "fin", - "gov", - "ind", - "info", - "intl", - "mincom", - "nat", - "net", - "org", - "perso", - "rnrt", - "rns", - "rnu", - "tourism", - "turen", - "com", - "edu", - "gov", - "mil", - "net", - "org", - "vpnplus", - "av", - "bbs", - "bel", - "biz", - "com", - "dr", - "edu", - "gen", - "gov", - "info", - "k12", - "kep", - "mil", - "name", - "nc", - "net", - "org", - "pol", - "tel", - "tv", - "web", - "blogspot", - "gov", - "ybo", - "aero", - "biz", - "co", - "com", - "coop", - "edu", - "gov", - "info", - "int", - "jobs", - "mobi", - "museum", - "name", - "net", - "org", - "pro", - "travel", - "better-than", - "dyndns", - "on-the-web", - "worse-than", - "blogspot", - "club", - "com", - "ebiz", - "edu", - "game", - "gov", - "idv", - "mil", - "net", - "nym", - "org", - "url", - "xn--czrw28b", - "xn--uc0atv", - "xn--zf0ao64a", - "mymailer", - "ac", - "co", - "go", - "hotel", - "info", - "me", - "mil", - "mobi", - "ne", - "or", - "sc", - "tv", - "biz", - "cc", - "cherkassy", - "cherkasy", - "chernigov", - "chernihiv", - "chernivtsi", - "chernovtsy", - "ck", - "cn", - "co", - "com", - "cr", - "crimea", - "cv", - "dn", - "dnepropetrovsk", - "dnipropetrovsk", - "dominic", - "donetsk", - "dp", - "edu", - "gov", - "if", - "in", - "inf", - "ivano-frankivsk", - "kh", - "kharkiv", - "kharkov", - "kherson", - "khmelnitskiy", - "khmelnytskyi", - "kiev", - "kirovograd", - "km", - "kr", - "krym", - "ks", - "kv", - "kyiv", - "lg", - "lt", - "ltd", - "lugansk", - "lutsk", - "lv", - "lviv", - "mk", - "mykolaiv", - "net", - "nikolaev", - "od", - "odesa", - "odessa", - "org", - "pl", - "poltava", - "pp", - "rivne", - "rovno", - "rv", - "sb", - "sebastopol", - "sevastopol", - "sm", - "sumy", - "te", - "ternopil", - "uz", - "uzhgorod", - "vinnica", - "vinnytsia", - "vn", - "volyn", - "yalta", - "zaporizhzhe", - "zaporizhzhia", - "zhitomir", - "zhytomyr", - "zp", - "zt", - "ac", - "blogspot", - "co", - "com", - "go", - "ne", - "nom", - "or", - "org", - "sc", - "ac", - "co", - "gov", - "ltd", - "me", - "net", - "nhs", - "org", - "plc", - "police", - "sch", - "blogspot", - "no-ip", - "wellbeingzone", - "homeoffice", - "service", - "ak", - "al", - "ar", - "as", - "az", - "ca", - "cloudns", - "co", - "ct", - "dc", - "de", - "dni", - "drud", - "fed", - "fl", - "ga", - "golffan", - "gu", - "hi", - "ia", - "id", - "il", - "in", - "is-by", - "isa", - "kids", - "ks", - "ky", - "la", - "land-4-sale", - "ma", - "md", - "me", - "mi", - "mn", - "mo", - "ms", - "mt", - "nc", - "nd", - "ne", - "nh", - "nj", - "nm", - "noip", - "nsn", - "nv", - "ny", - "oh", - "ok", - "or", - "pa", - "pointto", - "pr", - "ri", - "sc", - "sd", - "stuff-4-sale", - "tn", - "tx", - "ut", - "va", - "vi", - "vt", - "wa", - "wi", - "wv", - "wy", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "chtr", - "paroch", - "pvt", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "ann-arbor", - "cc", - "cog", - "dst", - "eaton", - "gen", - "k12", - "lib", - "mus", - "tec", - "washtenaw", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "k12", - "lib", - "cc", - "cc", - "k12", - "lib", - "com", - "edu", - "gub", - "mil", - "net", - "nom", - "org", - "blogspot", - "co", - "com", - "net", - "org", - "com", - "edu", - "gov", - "mil", - "net", - "nom", - "org", - "arts", - "co", - "com", - "e12", - "edu", - "firm", - "gob", - "gov", - "info", - "int", - "mil", - "net", - "org", - "rec", - "store", - "tec", - "web", - "nom", - "co", - "com", - "k12", - "net", - "org", - "ac", - "biz", - "blogspot", - "com", - "edu", - "gov", - "health", - "info", - "int", - "name", - "net", - "org", - "pro", - "com", - "edu", - "net", - "org", - "advisor", - "com", - "dyndns", - "edu", - "gov", - "mypets", - "net", - "org", - "xn--80au", - "xn--90azh", - "xn--c1avg", - "xn--d1at", - "xn--o1ac", - "xn--o1ach", - "xn--12c1fe0br", - "xn--12cfi8ixb8l", - "xn--12co0c3b4eva", - "xn--h3cuzk1di", - "xn--m3ch0j3a", - "xn--o3cyx2a", - "blogsite", - "fhapp", - "ac", - "agric", - "alt", - "co", - "edu", - "gov", - "grondar", - "law", - "mil", - "net", - "ngo", - "nis", - "nom", - "org", - "school", - "tm", - "web", - "blogspot", - "ac", - "biz", - "co", - "com", - "edu", - "gov", - "info", - "mil", - "net", - "org", - "sch", - "lima", - "triton", - "ac", - "co", - "gov", - "mil", - "org", -} diff --git a/vendor/golang.org/x/net/route/address.go b/vendor/golang.org/x/net/route/address.go deleted file mode 100644 index e6bfa39e..00000000 --- a/vendor/golang.org/x/net/route/address.go +++ /dev/null @@ -1,425 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package route - -import "runtime" - -// An Addr represents an address associated with packet routing. -type Addr interface { - // Family returns an address family. - Family() int -} - -// A LinkAddr represents a link-layer address. -type LinkAddr struct { - Index int // interface index when attached - Name string // interface name when attached - Addr []byte // link-layer address when attached -} - -// Family implements the Family method of Addr interface. -func (a *LinkAddr) Family() int { return sysAF_LINK } - -func (a *LinkAddr) lenAndSpace() (int, int) { - l := 8 + len(a.Name) + len(a.Addr) - return l, roundup(l) -} - -func (a *LinkAddr) marshal(b []byte) (int, error) { - l, ll := a.lenAndSpace() - if len(b) < ll { - return 0, errShortBuffer - } - nlen, alen := len(a.Name), len(a.Addr) - if nlen > 255 || alen > 255 { - return 0, errInvalidAddr - } - b[0] = byte(l) - b[1] = sysAF_LINK - if a.Index > 0 { - nativeEndian.PutUint16(b[2:4], uint16(a.Index)) - } - data := b[8:] - if nlen > 0 { - b[5] = byte(nlen) - copy(data[:nlen], a.Addr) - data = data[nlen:] - } - if alen > 0 { - b[6] = byte(alen) - copy(data[:alen], a.Name) - data = data[alen:] - } - return ll, nil -} - -func parseLinkAddr(b []byte) (Addr, error) { - if len(b) < 8 { - return nil, errInvalidAddr - } - _, a, err := parseKernelLinkAddr(sysAF_LINK, b[4:]) - if err != nil { - return nil, err - } - a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4])) - return a, nil -} - -// parseKernelLinkAddr parses b as a link-layer address in -// conventional BSD kernel form. -func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) { - // The encoding looks like the following: - // +----------------------------+ - // | Type (1 octet) | - // +----------------------------+ - // | Name length (1 octet) | - // +----------------------------+ - // | Address length (1 octet) | - // +----------------------------+ - // | Selector length (1 octet) | - // +----------------------------+ - // | Data (variable) | - // +----------------------------+ - // - // On some platforms, all-bit-one of length field means "don't - // care". - nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) - if nlen == 0xff { - nlen = 0 - } - if alen == 0xff { - alen = 0 - } - if slen == 0xff { - slen = 0 - } - l := 4 + nlen + alen + slen - if len(b) < l { - return 0, nil, errInvalidAddr - } - data := b[4:] - var name string - var addr []byte - if nlen > 0 { - name = string(data[:nlen]) - data = data[nlen:] - } - if alen > 0 { - addr = data[:alen] - data = data[alen:] - } - return l, &LinkAddr{Name: name, Addr: addr}, nil -} - -// An Inet4Addr represents an internet address for IPv4. -type Inet4Addr struct { - IP [4]byte // IP address -} - -// Family implements the Family method of Addr interface. -func (a *Inet4Addr) Family() int { return sysAF_INET } - -func (a *Inet4Addr) lenAndSpace() (int, int) { - return sizeofSockaddrInet, roundup(sizeofSockaddrInet) -} - -func (a *Inet4Addr) marshal(b []byte) (int, error) { - l, ll := a.lenAndSpace() - if len(b) < ll { - return 0, errShortBuffer - } - b[0] = byte(l) - b[1] = sysAF_INET - copy(b[4:8], a.IP[:]) - return ll, nil -} - -// An Inet6Addr represents an internet address for IPv6. -type Inet6Addr struct { - IP [16]byte // IP address - ZoneID int // zone identifier -} - -// Family implements the Family method of Addr interface. -func (a *Inet6Addr) Family() int { return sysAF_INET6 } - -func (a *Inet6Addr) lenAndSpace() (int, int) { - return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6) -} - -func (a *Inet6Addr) marshal(b []byte) (int, error) { - l, ll := a.lenAndSpace() - if len(b) < ll { - return 0, errShortBuffer - } - b[0] = byte(l) - b[1] = sysAF_INET6 - copy(b[8:24], a.IP[:]) - if a.ZoneID > 0 { - nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID)) - } - return ll, nil -} - -// parseInetAddr parses b as an internet address for IPv4 or IPv6. -func parseInetAddr(af int, b []byte) (Addr, error) { - switch af { - case sysAF_INET: - if len(b) < sizeofSockaddrInet { - return nil, errInvalidAddr - } - a := &Inet4Addr{} - copy(a.IP[:], b[4:8]) - return a, nil - case sysAF_INET6: - if len(b) < sizeofSockaddrInet6 { - return nil, errInvalidAddr - } - a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))} - copy(a.IP[:], b[8:24]) - if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) { - // KAME based IPv6 protocol stack usually - // embeds the interface index in the - // interface-local or link-local address as - // the kernel-internal form. - id := int(bigEndian.Uint16(a.IP[2:4])) - if id != 0 { - a.ZoneID = id - a.IP[2], a.IP[3] = 0, 0 - } - } - return a, nil - default: - return nil, errInvalidAddr - } -} - -// parseKernelInetAddr parses b as an internet address in conventional -// BSD kernel form. -func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { - // The encoding looks similar to the NLRI encoding. - // +----------------------------+ - // | Length (1 octet) | - // +----------------------------+ - // | Address prefix (variable) | - // +----------------------------+ - // - // The differences between the kernel form and the NLRI - // encoding are: - // - // - The length field of the kernel form indicates the prefix - // length in bytes, not in bits - // - // - In the kernel form, zero value of the length field - // doesn't mean 0.0.0.0/0 or ::/0 - // - // - The kernel form appends leading bytes to the prefix field - // to make the tuple to be conformed with - // the routing message boundary - l := int(b[0]) - if runtime.GOOS == "darwin" { - // On Darwn, an address in the kernel form is also - // used as a message filler. - if l == 0 || len(b) > roundup(l) { - l = roundup(l) - } - } else { - l = roundup(l) - } - if len(b) < l { - return 0, nil, errInvalidAddr - } - // Don't reorder case expressions. - // The case expressions for IPv6 must come first. - const ( - off4 = 4 // offset of in_addr - off6 = 8 // offset of in6_addr - ) - switch { - case b[0] == sizeofSockaddrInet6: - a := &Inet6Addr{} - copy(a.IP[:], b[off6:off6+16]) - return int(b[0]), a, nil - case af == sysAF_INET6: - a := &Inet6Addr{} - if l-1 < off6 { - copy(a.IP[:], b[1:l]) - } else { - copy(a.IP[:], b[l-off6:l]) - } - return int(b[0]), a, nil - case b[0] == sizeofSockaddrInet: - a := &Inet4Addr{} - copy(a.IP[:], b[off4:off4+4]) - return int(b[0]), a, nil - default: // an old fashion, AF_UNSPEC or unknown means AF_INET - a := &Inet4Addr{} - if l-1 < off4 { - copy(a.IP[:], b[1:l]) - } else { - copy(a.IP[:], b[l-off4:l]) - } - return int(b[0]), a, nil - } -} - -// A DefaultAddr represents an address of various operating -// system-specific features. -type DefaultAddr struct { - af int - Raw []byte // raw format of address -} - -// Family implements the Family method of Addr interface. -func (a *DefaultAddr) Family() int { return a.af } - -func (a *DefaultAddr) lenAndSpace() (int, int) { - l := len(a.Raw) - return l, roundup(l) -} - -func (a *DefaultAddr) marshal(b []byte) (int, error) { - l, ll := a.lenAndSpace() - if len(b) < ll { - return 0, errShortBuffer - } - if l > 255 { - return 0, errInvalidAddr - } - b[1] = byte(l) - copy(b[:l], a.Raw) - return ll, nil -} - -func parseDefaultAddr(b []byte) (Addr, error) { - if len(b) < 2 || len(b) < int(b[0]) { - return nil, errInvalidAddr - } - a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]} - return a, nil -} - -func addrsSpace(as []Addr) int { - var l int - for _, a := range as { - switch a := a.(type) { - case *LinkAddr: - _, ll := a.lenAndSpace() - l += ll - case *Inet4Addr: - _, ll := a.lenAndSpace() - l += ll - case *Inet6Addr: - _, ll := a.lenAndSpace() - l += ll - case *DefaultAddr: - _, ll := a.lenAndSpace() - l += ll - } - } - return l -} - -// marshalAddrs marshals as and returns a bitmap indicating which -// address is stored in b. -func marshalAddrs(b []byte, as []Addr) (uint, error) { - var attrs uint - for i, a := range as { - switch a := a.(type) { - case *LinkAddr: - l, err := a.marshal(b) - if err != nil { - return 0, err - } - b = b[l:] - attrs |= 1 << uint(i) - case *Inet4Addr: - l, err := a.marshal(b) - if err != nil { - return 0, err - } - b = b[l:] - attrs |= 1 << uint(i) - case *Inet6Addr: - l, err := a.marshal(b) - if err != nil { - return 0, err - } - b = b[l:] - attrs |= 1 << uint(i) - case *DefaultAddr: - l, err := a.marshal(b) - if err != nil { - return 0, err - } - b = b[l:] - attrs |= 1 << uint(i) - } - } - return attrs, nil -} - -func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) { - var as [sysRTAX_MAX]Addr - af := int(sysAF_UNSPEC) - for i := uint(0); i < sysRTAX_MAX && len(b) >= roundup(0); i++ { - if attrs&(1<> 8) -} - -func (binaryLittleEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func (binaryLittleEndian) PutUint32(b []byte, v uint32) { - _ = b[3] // early bounds check to guarantee safety of writes below - b[0] = byte(v) - b[1] = byte(v >> 8) - b[2] = byte(v >> 16) - b[3] = byte(v >> 24) -} - -func (binaryLittleEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -type binaryBigEndian struct{} - -func (binaryBigEndian) Uint16(b []byte) uint16 { - _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 - return uint16(b[1]) | uint16(b[0])<<8 -} - -func (binaryBigEndian) PutUint16(b []byte, v uint16) { - _ = b[1] // early bounds check to guarantee safety of writes below - b[0] = byte(v >> 8) - b[1] = byte(v) -} - -func (binaryBigEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 -} - -func (binaryBigEndian) PutUint32(b []byte, v uint32) { - _ = b[3] // early bounds check to guarantee safety of writes below - b[0] = byte(v >> 24) - b[1] = byte(v >> 16) - b[2] = byte(v >> 8) - b[3] = byte(v) -} - -func (binaryBigEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | - uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 -} diff --git a/vendor/golang.org/x/net/route/defs_darwin.go b/vendor/golang.org/x/net/route/defs_darwin.go deleted file mode 100644 index e7716442..00000000 --- a/vendor/golang.org/x/net/route/defs_darwin.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package route - -/* -#include -#include - -#include -#include -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_ROUTE = C.AF_ROUTE - sysAF_LINK = C.AF_LINK - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW - - sysNET_RT_DUMP = C.NET_RT_DUMP - sysNET_RT_FLAGS = C.NET_RT_FLAGS - sysNET_RT_IFLIST = C.NET_RT_IFLIST - sysNET_RT_STAT = C.NET_RT_STAT - sysNET_RT_TRASH = C.NET_RT_TRASH - sysNET_RT_IFLIST2 = C.NET_RT_IFLIST2 - sysNET_RT_DUMP2 = C.NET_RT_DUMP2 - sysNET_RT_MAXID = C.NET_RT_MAXID -) - -const ( - sysCTL_MAXNAME = C.CTL_MAXNAME - - sysCTL_UNSPEC = C.CTL_UNSPEC - sysCTL_KERN = C.CTL_KERN - sysCTL_VM = C.CTL_VM - sysCTL_VFS = C.CTL_VFS - sysCTL_NET = C.CTL_NET - sysCTL_DEBUG = C.CTL_DEBUG - sysCTL_HW = C.CTL_HW - sysCTL_MACHDEP = C.CTL_MACHDEP - sysCTL_USER = C.CTL_USER - sysCTL_MAXID = C.CTL_MAXID -) - -const ( - sysRTM_VERSION = C.RTM_VERSION - - sysRTM_ADD = C.RTM_ADD - sysRTM_DELETE = C.RTM_DELETE - sysRTM_CHANGE = C.RTM_CHANGE - sysRTM_GET = C.RTM_GET - sysRTM_LOSING = C.RTM_LOSING - sysRTM_REDIRECT = C.RTM_REDIRECT - sysRTM_MISS = C.RTM_MISS - sysRTM_LOCK = C.RTM_LOCK - sysRTM_OLDADD = C.RTM_OLDADD - sysRTM_OLDDEL = C.RTM_OLDDEL - sysRTM_RESOLVE = C.RTM_RESOLVE - sysRTM_NEWADDR = C.RTM_NEWADDR - sysRTM_DELADDR = C.RTM_DELADDR - sysRTM_IFINFO = C.RTM_IFINFO - sysRTM_NEWMADDR = C.RTM_NEWMADDR - sysRTM_DELMADDR = C.RTM_DELMADDR - sysRTM_IFINFO2 = C.RTM_IFINFO2 - sysRTM_NEWMADDR2 = C.RTM_NEWMADDR2 - sysRTM_GET2 = C.RTM_GET2 - - sysRTA_DST = C.RTA_DST - sysRTA_GATEWAY = C.RTA_GATEWAY - sysRTA_NETMASK = C.RTA_NETMASK - sysRTA_GENMASK = C.RTA_GENMASK - sysRTA_IFP = C.RTA_IFP - sysRTA_IFA = C.RTA_IFA - sysRTA_AUTHOR = C.RTA_AUTHOR - sysRTA_BRD = C.RTA_BRD - - sysRTAX_DST = C.RTAX_DST - sysRTAX_GATEWAY = C.RTAX_GATEWAY - sysRTAX_NETMASK = C.RTAX_NETMASK - sysRTAX_GENMASK = C.RTAX_GENMASK - sysRTAX_IFP = C.RTAX_IFP - sysRTAX_IFA = C.RTAX_IFA - sysRTAX_AUTHOR = C.RTAX_AUTHOR - sysRTAX_BRD = C.RTAX_BRD - sysRTAX_MAX = C.RTAX_MAX -) - -const ( - sizeofIfMsghdrDarwin15 = C.sizeof_struct_if_msghdr - sizeofIfaMsghdrDarwin15 = C.sizeof_struct_ifa_msghdr - sizeofIfmaMsghdrDarwin15 = C.sizeof_struct_ifma_msghdr - sizeofIfMsghdr2Darwin15 = C.sizeof_struct_if_msghdr2 - sizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2 - sizeofIfDataDarwin15 = C.sizeof_struct_if_data - sizeofIfData64Darwin15 = C.sizeof_struct_if_data64 - - sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr - sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2 - sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/route/defs_dragonfly.go b/vendor/golang.org/x/net/route/defs_dragonfly.go deleted file mode 100644 index dd31de26..00000000 --- a/vendor/golang.org/x/net/route/defs_dragonfly.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package route - -/* -#include -#include - -#include -#include -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_ROUTE = C.AF_ROUTE - sysAF_LINK = C.AF_LINK - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW - - sysNET_RT_DUMP = C.NET_RT_DUMP - sysNET_RT_FLAGS = C.NET_RT_FLAGS - sysNET_RT_IFLIST = C.NET_RT_IFLIST - sysNET_RT_MAXID = C.NET_RT_MAXID -) - -const ( - sysCTL_MAXNAME = C.CTL_MAXNAME - - sysCTL_UNSPEC = C.CTL_UNSPEC - sysCTL_KERN = C.CTL_KERN - sysCTL_VM = C.CTL_VM - sysCTL_VFS = C.CTL_VFS - sysCTL_NET = C.CTL_NET - sysCTL_DEBUG = C.CTL_DEBUG - sysCTL_HW = C.CTL_HW - sysCTL_MACHDEP = C.CTL_MACHDEP - sysCTL_USER = C.CTL_USER - sysCTL_P1003_1B = C.CTL_P1003_1B - sysCTL_LWKT = C.CTL_LWKT - sysCTL_MAXID = C.CTL_MAXID -) - -const ( - sysRTM_VERSION = C.RTM_VERSION - - sysRTM_ADD = C.RTM_ADD - sysRTM_DELETE = C.RTM_DELETE - sysRTM_CHANGE = C.RTM_CHANGE - sysRTM_GET = C.RTM_GET - sysRTM_LOSING = C.RTM_LOSING - sysRTM_REDIRECT = C.RTM_REDIRECT - sysRTM_MISS = C.RTM_MISS - sysRTM_LOCK = C.RTM_LOCK - sysRTM_OLDADD = C.RTM_OLDADD - sysRTM_OLDDEL = C.RTM_OLDDEL - sysRTM_RESOLVE = C.RTM_RESOLVE - sysRTM_NEWADDR = C.RTM_NEWADDR - sysRTM_DELADDR = C.RTM_DELADDR - sysRTM_IFINFO = C.RTM_IFINFO - sysRTM_NEWMADDR = C.RTM_NEWMADDR - sysRTM_DELMADDR = C.RTM_DELMADDR - sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE - sysRTM_IEEE80211 = C.RTM_IEEE80211 - - sysRTA_DST = C.RTA_DST - sysRTA_GATEWAY = C.RTA_GATEWAY - sysRTA_NETMASK = C.RTA_NETMASK - sysRTA_GENMASK = C.RTA_GENMASK - sysRTA_IFP = C.RTA_IFP - sysRTA_IFA = C.RTA_IFA - sysRTA_AUTHOR = C.RTA_AUTHOR - sysRTA_BRD = C.RTA_BRD - sysRTA_MPLS1 = C.RTA_MPLS1 - sysRTA_MPLS2 = C.RTA_MPLS2 - sysRTA_MPLS3 = C.RTA_MPLS3 - - sysRTAX_DST = C.RTAX_DST - sysRTAX_GATEWAY = C.RTAX_GATEWAY - sysRTAX_NETMASK = C.RTAX_NETMASK - sysRTAX_GENMASK = C.RTAX_GENMASK - sysRTAX_IFP = C.RTAX_IFP - sysRTAX_IFA = C.RTAX_IFA - sysRTAX_AUTHOR = C.RTAX_AUTHOR - sysRTAX_BRD = C.RTAX_BRD - sysRTAX_MPLS1 = C.RTAX_MPLS1 - sysRTAX_MPLS2 = C.RTAX_MPLS2 - sysRTAX_MPLS3 = C.RTAX_MPLS3 - sysRTAX_MAX = C.RTAX_MAX -) - -const ( - sizeofIfMsghdrDragonFlyBSD4 = C.sizeof_struct_if_msghdr - sizeofIfaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifa_msghdr - sizeofIfmaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifma_msghdr - sizeofIfAnnouncemsghdrDragonFlyBSD4 = C.sizeof_struct_if_announcemsghdr - - sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr - sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/route/defs_freebsd.go b/vendor/golang.org/x/net/route/defs_freebsd.go deleted file mode 100644 index d95594d8..00000000 --- a/vendor/golang.org/x/net/route/defs_freebsd.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package route - -/* -#include -#include - -#include -#include -#include - -#include - -struct if_data_freebsd7 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; - time_t __ifi_epoch; - struct timeval __ifi_lastchange; -}; - -struct if_data_freebsd8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; - time_t __ifi_epoch; - struct timeval __ifi_lastchange; -}; - -struct if_data_freebsd9 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; - time_t __ifi_epoch; - struct timeval __ifi_lastchange; -}; - -struct if_data_freebsd10 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_vhid; - u_char ifi_baudrate_pf; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - uint64_t ifi_hwassist; - time_t __ifi_epoch; - struct timeval __ifi_lastchange; -}; - -struct if_data_freebsd11 { - uint8_t ifi_type; - uint8_t ifi_physical; - uint8_t ifi_addrlen; - uint8_t ifi_hdrlen; - uint8_t ifi_link_state; - uint8_t ifi_vhid; - uint16_t ifi_datalen; - uint32_t ifi_mtu; - uint32_t ifi_metric; - uint64_t ifi_baudrate; - uint64_t ifi_ipackets; - uint64_t ifi_ierrors; - uint64_t ifi_opackets; - uint64_t ifi_oerrors; - uint64_t ifi_collisions; - uint64_t ifi_ibytes; - uint64_t ifi_obytes; - uint64_t ifi_imcasts; - uint64_t ifi_omcasts; - uint64_t ifi_iqdrops; - uint64_t ifi_oqdrops; - uint64_t ifi_noproto; - uint64_t ifi_hwassist; - union { - time_t tt; - uint64_t ph; - } __ifi_epoch; - union { - struct timeval tv; - struct { - uint64_t ph1; - uint64_t ph2; - } ph; - } __ifi_lastchange; -}; - -struct if_msghdr_freebsd7 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data_freebsd7 ifm_data; -}; - -struct if_msghdr_freebsd8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data_freebsd8 ifm_data; -}; - -struct if_msghdr_freebsd9 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data_freebsd9 ifm_data; -}; - -struct if_msghdr_freebsd10 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data_freebsd10 ifm_data; -}; - -struct if_msghdr_freebsd11 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data_freebsd11 ifm_data; -}; -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_ROUTE = C.AF_ROUTE - sysAF_LINK = C.AF_LINK - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW - - sysNET_RT_DUMP = C.NET_RT_DUMP - sysNET_RT_FLAGS = C.NET_RT_FLAGS - sysNET_RT_IFLIST = C.NET_RT_IFLIST - sysNET_RT_IFMALIST = C.NET_RT_IFMALIST - sysNET_RT_IFLISTL = C.NET_RT_IFLISTL -) - -const ( - sysCTL_MAXNAME = C.CTL_MAXNAME - - sysCTL_UNSPEC = C.CTL_UNSPEC - sysCTL_KERN = C.CTL_KERN - sysCTL_VM = C.CTL_VM - sysCTL_VFS = C.CTL_VFS - sysCTL_NET = C.CTL_NET - sysCTL_DEBUG = C.CTL_DEBUG - sysCTL_HW = C.CTL_HW - sysCTL_MACHDEP = C.CTL_MACHDEP - sysCTL_USER = C.CTL_USER - sysCTL_P1003_1B = C.CTL_P1003_1B -) - -const ( - sysRTM_VERSION = C.RTM_VERSION - - sysRTM_ADD = C.RTM_ADD - sysRTM_DELETE = C.RTM_DELETE - sysRTM_CHANGE = C.RTM_CHANGE - sysRTM_GET = C.RTM_GET - sysRTM_LOSING = C.RTM_LOSING - sysRTM_REDIRECT = C.RTM_REDIRECT - sysRTM_MISS = C.RTM_MISS - sysRTM_LOCK = C.RTM_LOCK - sysRTM_RESOLVE = C.RTM_RESOLVE - sysRTM_NEWADDR = C.RTM_NEWADDR - sysRTM_DELADDR = C.RTM_DELADDR - sysRTM_IFINFO = C.RTM_IFINFO - sysRTM_NEWMADDR = C.RTM_NEWMADDR - sysRTM_DELMADDR = C.RTM_DELMADDR - sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE - sysRTM_IEEE80211 = C.RTM_IEEE80211 - - sysRTA_DST = C.RTA_DST - sysRTA_GATEWAY = C.RTA_GATEWAY - sysRTA_NETMASK = C.RTA_NETMASK - sysRTA_GENMASK = C.RTA_GENMASK - sysRTA_IFP = C.RTA_IFP - sysRTA_IFA = C.RTA_IFA - sysRTA_AUTHOR = C.RTA_AUTHOR - sysRTA_BRD = C.RTA_BRD - - sysRTAX_DST = C.RTAX_DST - sysRTAX_GATEWAY = C.RTAX_GATEWAY - sysRTAX_NETMASK = C.RTAX_NETMASK - sysRTAX_GENMASK = C.RTAX_GENMASK - sysRTAX_IFP = C.RTAX_IFP - sysRTAX_IFA = C.RTAX_IFA - sysRTAX_AUTHOR = C.RTAX_AUTHOR - sysRTAX_BRD = C.RTAX_BRD - sysRTAX_MAX = C.RTAX_MAX -) - -const ( - sizeofIfMsghdrlFreeBSD10 = C.sizeof_struct_if_msghdrl - sizeofIfaMsghdrFreeBSD10 = C.sizeof_struct_ifa_msghdr - sizeofIfaMsghdrlFreeBSD10 = C.sizeof_struct_ifa_msghdrl - sizeofIfmaMsghdrFreeBSD10 = C.sizeof_struct_ifma_msghdr - sizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr - - sizeofRtMsghdrFreeBSD10 = C.sizeof_struct_rt_msghdr - sizeofRtMetricsFreeBSD10 = C.sizeof_struct_rt_metrics - - sizeofIfMsghdrFreeBSD7 = C.sizeof_struct_if_msghdr_freebsd7 - sizeofIfMsghdrFreeBSD8 = C.sizeof_struct_if_msghdr_freebsd8 - sizeofIfMsghdrFreeBSD9 = C.sizeof_struct_if_msghdr_freebsd9 - sizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10 - sizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11 - - sizeofIfDataFreeBSD7 = C.sizeof_struct_if_data_freebsd7 - sizeofIfDataFreeBSD8 = C.sizeof_struct_if_data_freebsd8 - sizeofIfDataFreeBSD9 = C.sizeof_struct_if_data_freebsd9 - sizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10 - sizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11 - - sizeofIfMsghdrlFreeBSD10Emu = C.sizeof_struct_if_msghdrl - sizeofIfaMsghdrFreeBSD10Emu = C.sizeof_struct_ifa_msghdr - sizeofIfaMsghdrlFreeBSD10Emu = C.sizeof_struct_ifa_msghdrl - sizeofIfmaMsghdrFreeBSD10Emu = C.sizeof_struct_ifma_msghdr - sizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr - - sizeofRtMsghdrFreeBSD10Emu = C.sizeof_struct_rt_msghdr - sizeofRtMetricsFreeBSD10Emu = C.sizeof_struct_rt_metrics - - sizeofIfMsghdrFreeBSD7Emu = C.sizeof_struct_if_msghdr_freebsd7 - sizeofIfMsghdrFreeBSD8Emu = C.sizeof_struct_if_msghdr_freebsd8 - sizeofIfMsghdrFreeBSD9Emu = C.sizeof_struct_if_msghdr_freebsd9 - sizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10 - sizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11 - - sizeofIfDataFreeBSD7Emu = C.sizeof_struct_if_data_freebsd7 - sizeofIfDataFreeBSD8Emu = C.sizeof_struct_if_data_freebsd8 - sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9 - sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10 - sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11 - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/route/defs_netbsd.go b/vendor/golang.org/x/net/route/defs_netbsd.go deleted file mode 100644 index b0abd549..00000000 --- a/vendor/golang.org/x/net/route/defs_netbsd.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package route - -/* -#include -#include - -#include -#include -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_ROUTE = C.AF_ROUTE - sysAF_LINK = C.AF_LINK - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW - - sysNET_RT_DUMP = C.NET_RT_DUMP - sysNET_RT_FLAGS = C.NET_RT_FLAGS - sysNET_RT_IFLIST = C.NET_RT_IFLIST - sysNET_RT_MAXID = C.NET_RT_MAXID -) - -const ( - sysCTL_MAXNAME = C.CTL_MAXNAME - - sysCTL_UNSPEC = C.CTL_UNSPEC - sysCTL_KERN = C.CTL_KERN - sysCTL_VM = C.CTL_VM - sysCTL_VFS = C.CTL_VFS - sysCTL_NET = C.CTL_NET - sysCTL_DEBUG = C.CTL_DEBUG - sysCTL_HW = C.CTL_HW - sysCTL_MACHDEP = C.CTL_MACHDEP - sysCTL_USER = C.CTL_USER - sysCTL_DDB = C.CTL_DDB - sysCTL_PROC = C.CTL_PROC - sysCTL_VENDOR = C.CTL_VENDOR - sysCTL_EMUL = C.CTL_EMUL - sysCTL_SECURITY = C.CTL_SECURITY - sysCTL_MAXID = C.CTL_MAXID -) - -const ( - sysRTM_VERSION = C.RTM_VERSION - - sysRTM_ADD = C.RTM_ADD - sysRTM_DELETE = C.RTM_DELETE - sysRTM_CHANGE = C.RTM_CHANGE - sysRTM_GET = C.RTM_GET - sysRTM_LOSING = C.RTM_LOSING - sysRTM_REDIRECT = C.RTM_REDIRECT - sysRTM_MISS = C.RTM_MISS - sysRTM_LOCK = C.RTM_LOCK - sysRTM_OLDADD = C.RTM_OLDADD - sysRTM_OLDDEL = C.RTM_OLDDEL - sysRTM_RESOLVE = C.RTM_RESOLVE - sysRTM_NEWADDR = C.RTM_NEWADDR - sysRTM_DELADDR = C.RTM_DELADDR - sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE - sysRTM_IEEE80211 = C.RTM_IEEE80211 - sysRTM_SETGATE = C.RTM_SETGATE - sysRTM_LLINFO_UPD = C.RTM_LLINFO_UPD - sysRTM_IFINFO = C.RTM_IFINFO - sysRTM_CHGADDR = C.RTM_CHGADDR - - sysRTA_DST = C.RTA_DST - sysRTA_GATEWAY = C.RTA_GATEWAY - sysRTA_NETMASK = C.RTA_NETMASK - sysRTA_GENMASK = C.RTA_GENMASK - sysRTA_IFP = C.RTA_IFP - sysRTA_IFA = C.RTA_IFA - sysRTA_AUTHOR = C.RTA_AUTHOR - sysRTA_BRD = C.RTA_BRD - sysRTA_TAG = C.RTA_TAG - - sysRTAX_DST = C.RTAX_DST - sysRTAX_GATEWAY = C.RTAX_GATEWAY - sysRTAX_NETMASK = C.RTAX_NETMASK - sysRTAX_GENMASK = C.RTAX_GENMASK - sysRTAX_IFP = C.RTAX_IFP - sysRTAX_IFA = C.RTAX_IFA - sysRTAX_AUTHOR = C.RTAX_AUTHOR - sysRTAX_BRD = C.RTAX_BRD - sysRTAX_TAG = C.RTAX_TAG - sysRTAX_MAX = C.RTAX_MAX -) - -const ( - sizeofIfMsghdrNetBSD7 = C.sizeof_struct_if_msghdr - sizeofIfaMsghdrNetBSD7 = C.sizeof_struct_ifa_msghdr - sizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr - - sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr - sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/route/defs_openbsd.go b/vendor/golang.org/x/net/route/defs_openbsd.go deleted file mode 100644 index 173bb5d5..00000000 --- a/vendor/golang.org/x/net/route/defs_openbsd.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package route - -/* -#include -#include - -#include -#include -#include - -#include -*/ -import "C" - -const ( - sysAF_UNSPEC = C.AF_UNSPEC - sysAF_INET = C.AF_INET - sysAF_ROUTE = C.AF_ROUTE - sysAF_LINK = C.AF_LINK - sysAF_INET6 = C.AF_INET6 - - sysSOCK_RAW = C.SOCK_RAW - - sysNET_RT_DUMP = C.NET_RT_DUMP - sysNET_RT_FLAGS = C.NET_RT_FLAGS - sysNET_RT_IFLIST = C.NET_RT_IFLIST - sysNET_RT_STATS = C.NET_RT_STATS - sysNET_RT_TABLE = C.NET_RT_TABLE - sysNET_RT_IFNAMES = C.NET_RT_IFNAMES - sysNET_RT_MAXID = C.NET_RT_MAXID -) - -const ( - sysCTL_MAXNAME = C.CTL_MAXNAME - - sysCTL_UNSPEC = C.CTL_UNSPEC - sysCTL_KERN = C.CTL_KERN - sysCTL_VM = C.CTL_VM - sysCTL_FS = C.CTL_FS - sysCTL_NET = C.CTL_NET - sysCTL_DEBUG = C.CTL_DEBUG - sysCTL_HW = C.CTL_HW - sysCTL_MACHDEP = C.CTL_MACHDEP - sysCTL_DDB = C.CTL_DDB - sysCTL_VFS = C.CTL_VFS - sysCTL_MAXID = C.CTL_MAXID -) - -const ( - sysRTM_VERSION = C.RTM_VERSION - - sysRTM_ADD = C.RTM_ADD - sysRTM_DELETE = C.RTM_DELETE - sysRTM_CHANGE = C.RTM_CHANGE - sysRTM_GET = C.RTM_GET - sysRTM_LOSING = C.RTM_LOSING - sysRTM_REDIRECT = C.RTM_REDIRECT - sysRTM_MISS = C.RTM_MISS - sysRTM_LOCK = C.RTM_LOCK - sysRTM_RESOLVE = C.RTM_RESOLVE - sysRTM_NEWADDR = C.RTM_NEWADDR - sysRTM_DELADDR = C.RTM_DELADDR - sysRTM_IFINFO = C.RTM_IFINFO - sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE - sysRTM_DESYNC = C.RTM_DESYNC - sysRTM_INVALIDATE = C.RTM_INVALIDATE - sysRTM_BFD = C.RTM_BFD - sysRTM_PROPOSAL = C.RTM_PROPOSAL - - sysRTA_DST = C.RTA_DST - sysRTA_GATEWAY = C.RTA_GATEWAY - sysRTA_NETMASK = C.RTA_NETMASK - sysRTA_GENMASK = C.RTA_GENMASK - sysRTA_IFP = C.RTA_IFP - sysRTA_IFA = C.RTA_IFA - sysRTA_AUTHOR = C.RTA_AUTHOR - sysRTA_BRD = C.RTA_BRD - sysRTA_SRC = C.RTA_SRC - sysRTA_SRCMASK = C.RTA_SRCMASK - sysRTA_LABEL = C.RTA_LABEL - sysRTA_BFD = C.RTA_BFD - sysRTA_DNS = C.RTA_DNS - sysRTA_STATIC = C.RTA_STATIC - sysRTA_SEARCH = C.RTA_SEARCH - - sysRTAX_DST = C.RTAX_DST - sysRTAX_GATEWAY = C.RTAX_GATEWAY - sysRTAX_NETMASK = C.RTAX_NETMASK - sysRTAX_GENMASK = C.RTAX_GENMASK - sysRTAX_IFP = C.RTAX_IFP - sysRTAX_IFA = C.RTAX_IFA - sysRTAX_AUTHOR = C.RTAX_AUTHOR - sysRTAX_BRD = C.RTAX_BRD - sysRTAX_SRC = C.RTAX_SRC - sysRTAX_SRCMASK = C.RTAX_SRCMASK - sysRTAX_LABEL = C.RTAX_LABEL - sysRTAX_BFD = C.RTAX_BFD - sysRTAX_DNS = C.RTAX_DNS - sysRTAX_STATIC = C.RTAX_STATIC - sysRTAX_SEARCH = C.RTAX_SEARCH - sysRTAX_MAX = C.RTAX_MAX -) - -const ( - sizeofRtMsghdr = C.sizeof_struct_rt_msghdr - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/route/interface.go b/vendor/golang.org/x/net/route/interface.go deleted file mode 100644 index 854906d9..00000000 --- a/vendor/golang.org/x/net/route/interface.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package route - -// An InterfaceMessage represents an interface message. -type InterfaceMessage struct { - Version int // message version - Type int // message type - Flags int // interface flags - Index int // interface index - Name string // interface name - Addrs []Addr // addresses - - extOff int // offset of header extension - raw []byte // raw message -} - -// An InterfaceAddrMessage represents an interface address message. -type InterfaceAddrMessage struct { - Version int // message version - Type int // message type - Flags int // interface flags - Index int // interface index - Addrs []Addr // addresses - - raw []byte // raw message -} - -// Sys implements the Sys method of Message interface. -func (m *InterfaceAddrMessage) Sys() []Sys { return nil } - -// An InterfaceMulticastAddrMessage represents an interface multicast -// address message. -type InterfaceMulticastAddrMessage struct { - Version int // message version - Type int // messsage type - Flags int // interface flags - Index int // interface index - Addrs []Addr // addresses - - raw []byte // raw message -} - -// Sys implements the Sys method of Message interface. -func (m *InterfaceMulticastAddrMessage) Sys() []Sys { return nil } - -// An InterfaceAnnounceMessage represents an interface announcement -// message. -type InterfaceAnnounceMessage struct { - Version int // message version - Type int // message type - Index int // interface index - Name string // interface name - What int // what type of announcement - - raw []byte // raw message -} - -// Sys implements the Sys method of Message interface. -func (m *InterfaceAnnounceMessage) Sys() []Sys { return nil } diff --git a/vendor/golang.org/x/net/route/interface_announce.go b/vendor/golang.org/x/net/route/interface_announce.go deleted file mode 100644 index 520d657b..00000000 --- a/vendor/golang.org/x/net/route/interface_announce.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build dragonfly freebsd netbsd - -package route - -func (w *wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { - if len(b) < w.bodyOff { - return nil, errMessageTooShort - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - m := &InterfaceAnnounceMessage{ - Version: int(b[2]), - Type: int(b[3]), - Index: int(nativeEndian.Uint16(b[4:6])), - What: int(nativeEndian.Uint16(b[22:24])), - raw: b[:l], - } - for i := 0; i < 16; i++ { - if b[6+i] != 0 { - continue - } - m.Name = string(b[6 : 6+i]) - break - } - return m, nil -} diff --git a/vendor/golang.org/x/net/route/interface_classic.go b/vendor/golang.org/x/net/route/interface_classic.go deleted file mode 100644 index ac4e7a68..00000000 --- a/vendor/golang.org/x/net/route/interface_classic.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly netbsd - -package route - -import "runtime" - -func (w *wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { - if len(b) < w.bodyOff { - return nil, errMessageTooShort - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - attrs := uint(nativeEndian.Uint32(b[4:8])) - if attrs&sysRTA_IFP == 0 { - return nil, nil - } - m := &InterfaceMessage{ - Version: int(b[2]), - Type: int(b[3]), - Addrs: make([]Addr, sysRTAX_MAX), - Flags: int(nativeEndian.Uint32(b[8:12])), - Index: int(nativeEndian.Uint16(b[12:14])), - extOff: w.extOff, - raw: b[:l], - } - a, err := parseLinkAddr(b[w.bodyOff:]) - if err != nil { - return nil, err - } - m.Addrs[sysRTAX_IFP] = a - m.Name = a.(*LinkAddr).Name - return m, nil -} - -func (w *wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { - if len(b) < w.bodyOff { - return nil, errMessageTooShort - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - m := &InterfaceAddrMessage{ - Version: int(b[2]), - Type: int(b[3]), - Flags: int(nativeEndian.Uint32(b[8:12])), - raw: b[:l], - } - if runtime.GOOS == "netbsd" { - m.Index = int(nativeEndian.Uint16(b[16:18])) - } else { - m.Index = int(nativeEndian.Uint16(b[12:14])) - } - var err error - m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) - if err != nil { - return nil, err - } - return m, nil -} diff --git a/vendor/golang.org/x/net/route/interface_freebsd.go b/vendor/golang.org/x/net/route/interface_freebsd.go deleted file mode 100644 index 9f6f50c0..00000000 --- a/vendor/golang.org/x/net/route/interface_freebsd.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -func (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, error) { - var extOff, bodyOff int - if typ == sysNET_RT_IFLISTL { - if len(b) < 20 { - return nil, errMessageTooShort - } - extOff = int(nativeEndian.Uint16(b[18:20])) - bodyOff = int(nativeEndian.Uint16(b[16:18])) - } else { - extOff = w.extOff - bodyOff = w.bodyOff - } - if len(b) < extOff || len(b) < bodyOff { - return nil, errInvalidMessage - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - attrs := uint(nativeEndian.Uint32(b[4:8])) - if attrs&sysRTA_IFP == 0 { - return nil, nil - } - m := &InterfaceMessage{ - Version: int(b[2]), - Type: int(b[3]), - Flags: int(nativeEndian.Uint32(b[8:12])), - Index: int(nativeEndian.Uint16(b[12:14])), - Addrs: make([]Addr, sysRTAX_MAX), - extOff: extOff, - raw: b[:l], - } - a, err := parseLinkAddr(b[bodyOff:]) - if err != nil { - return nil, err - } - m.Addrs[sysRTAX_IFP] = a - m.Name = a.(*LinkAddr).Name - return m, nil -} - -func (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message, error) { - var bodyOff int - if typ == sysNET_RT_IFLISTL { - if len(b) < 24 { - return nil, errMessageTooShort - } - bodyOff = int(nativeEndian.Uint16(b[16:18])) - } else { - bodyOff = w.bodyOff - } - if len(b) < bodyOff { - return nil, errInvalidMessage - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - m := &InterfaceAddrMessage{ - Version: int(b[2]), - Type: int(b[3]), - Flags: int(nativeEndian.Uint32(b[8:12])), - Index: int(nativeEndian.Uint16(b[12:14])), - raw: b[:l], - } - var err error - m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[bodyOff:]) - if err != nil { - return nil, err - } - return m, nil -} diff --git a/vendor/golang.org/x/net/route/interface_multicast.go b/vendor/golang.org/x/net/route/interface_multicast.go deleted file mode 100644 index 1e99a9cc..00000000 --- a/vendor/golang.org/x/net/route/interface_multicast.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd - -package route - -func (w *wireFormat) parseInterfaceMulticastAddrMessage(_ RIBType, b []byte) (Message, error) { - if len(b) < w.bodyOff { - return nil, errMessageTooShort - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - m := &InterfaceMulticastAddrMessage{ - Version: int(b[2]), - Type: int(b[3]), - Flags: int(nativeEndian.Uint32(b[8:12])), - Index: int(nativeEndian.Uint16(b[12:14])), - raw: b[:l], - } - var err error - m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) - if err != nil { - return nil, err - } - return m, nil -} diff --git a/vendor/golang.org/x/net/route/interface_openbsd.go b/vendor/golang.org/x/net/route/interface_openbsd.go deleted file mode 100644 index e4a143c1..00000000 --- a/vendor/golang.org/x/net/route/interface_openbsd.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -func (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { - if len(b) < 32 { - return nil, errMessageTooShort - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - attrs := uint(nativeEndian.Uint32(b[12:16])) - if attrs&sysRTA_IFP == 0 { - return nil, nil - } - m := &InterfaceMessage{ - Version: int(b[2]), - Type: int(b[3]), - Flags: int(nativeEndian.Uint32(b[16:20])), - Index: int(nativeEndian.Uint16(b[6:8])), - Addrs: make([]Addr, sysRTAX_MAX), - raw: b[:l], - } - ll := int(nativeEndian.Uint16(b[4:6])) - if len(b) < ll { - return nil, errInvalidMessage - } - a, err := parseLinkAddr(b[ll:]) - if err != nil { - return nil, err - } - m.Addrs[sysRTAX_IFP] = a - m.Name = a.(*LinkAddr).Name - return m, nil -} - -func (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { - if len(b) < 24 { - return nil, errMessageTooShort - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - bodyOff := int(nativeEndian.Uint16(b[4:6])) - if len(b) < bodyOff { - return nil, errInvalidMessage - } - m := &InterfaceAddrMessage{ - Version: int(b[2]), - Type: int(b[3]), - Flags: int(nativeEndian.Uint32(b[12:16])), - Index: int(nativeEndian.Uint16(b[6:8])), - raw: b[:l], - } - var err error - m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[bodyOff:]) - if err != nil { - return nil, err - } - return m, nil -} - -func (*wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { - if len(b) < 26 { - return nil, errMessageTooShort - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - m := &InterfaceAnnounceMessage{ - Version: int(b[2]), - Type: int(b[3]), - Index: int(nativeEndian.Uint16(b[6:8])), - What: int(nativeEndian.Uint16(b[8:10])), - raw: b[:l], - } - for i := 0; i < 16; i++ { - if b[10+i] != 0 { - continue - } - m.Name = string(b[10 : 10+i]) - break - } - return m, nil -} diff --git a/vendor/golang.org/x/net/route/message.go b/vendor/golang.org/x/net/route/message.go deleted file mode 100644 index 0fa7e09f..00000000 --- a/vendor/golang.org/x/net/route/message.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package route - -// A Message represents a routing message. -type Message interface { - // Sys returns operating system-specific information. - Sys() []Sys -} - -// A Sys reprensents operating system-specific information. -type Sys interface { - // SysType returns a type of operating system-specific - // information. - SysType() SysType -} - -// A SysType represents a type of operating system-specific -// information. -type SysType int - -const ( - SysMetrics SysType = iota - SysStats -) - -// ParseRIB parses b as a routing information base and returns a list -// of routing messages. -func ParseRIB(typ RIBType, b []byte) ([]Message, error) { - if !typ.parseable() { - return nil, errUnsupportedMessage - } - var msgs []Message - nmsgs, nskips := 0, 0 - for len(b) > 4 { - nmsgs++ - l := int(nativeEndian.Uint16(b[:2])) - if l == 0 { - return nil, errInvalidMessage - } - if len(b) < l { - return nil, errMessageTooShort - } - if b[2] != sysRTM_VERSION { - b = b[l:] - continue - } - if w, ok := wireFormats[int(b[3])]; !ok { - nskips++ - } else { - m, err := w.parse(typ, b) - if err != nil { - return nil, err - } - if m == nil { - nskips++ - } else { - msgs = append(msgs, m) - } - } - b = b[l:] - } - // We failed to parse any of the messages - version mismatch? - if nmsgs != len(msgs)+nskips { - return nil, errMessageMismatch - } - return msgs, nil -} diff --git a/vendor/golang.org/x/net/route/message_darwin_test.go b/vendor/golang.org/x/net/route/message_darwin_test.go deleted file mode 100644 index 316aa750..00000000 --- a/vendor/golang.org/x/net/route/message_darwin_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -import "testing" - -func TestFetchAndParseRIBOnDarwin(t *testing.T) { - for _, typ := range []RIBType{sysNET_RT_FLAGS, sysNET_RT_DUMP2, sysNET_RT_IFLIST2} { - var lastErr error - var ms []Message - for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { - rs, err := fetchAndParseRIB(af, typ) - if err != nil { - lastErr = err - continue - } - ms = append(ms, rs...) - } - if len(ms) == 0 && lastErr != nil { - t.Error(typ, lastErr) - continue - } - ss, err := msgs(ms).validate() - if err != nil { - t.Error(typ, err) - continue - } - for _, s := range ss { - t.Log(s) - } - } -} diff --git a/vendor/golang.org/x/net/route/message_freebsd_test.go b/vendor/golang.org/x/net/route/message_freebsd_test.go deleted file mode 100644 index db4b5675..00000000 --- a/vendor/golang.org/x/net/route/message_freebsd_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -import ( - "testing" - "unsafe" -) - -func TestFetchAndParseRIBOnFreeBSD(t *testing.T) { - for _, typ := range []RIBType{sysNET_RT_IFMALIST} { - var lastErr error - var ms []Message - for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { - rs, err := fetchAndParseRIB(af, typ) - if err != nil { - lastErr = err - continue - } - ms = append(ms, rs...) - } - if len(ms) == 0 && lastErr != nil { - t.Error(typ, lastErr) - continue - } - ss, err := msgs(ms).validate() - if err != nil { - t.Error(typ, err) - continue - } - for _, s := range ss { - t.Log(s) - } - } -} - -func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) { - if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil { - t.Skip("NET_RT_IFLISTL not supported") - } - var p uintptr - if kernelAlign != int(unsafe.Sizeof(p)) { - t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64") - } - - var tests = [2]struct { - typ RIBType - b []byte - msgs []Message - ss []string - }{ - {typ: sysNET_RT_IFLIST}, - {typ: sysNET_RT_IFLISTL}, - } - for i := range tests { - var lastErr error - for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { - rs, err := fetchAndParseRIB(af, tests[i].typ) - if err != nil { - lastErr = err - continue - } - tests[i].msgs = append(tests[i].msgs, rs...) - } - if len(tests[i].msgs) == 0 && lastErr != nil { - t.Error(tests[i].typ, lastErr) - continue - } - tests[i].ss, lastErr = msgs(tests[i].msgs).validate() - if lastErr != nil { - t.Error(tests[i].typ, lastErr) - continue - } - for _, s := range tests[i].ss { - t.Log(s) - } - } - for i := len(tests) - 1; i > 0; i-- { - if len(tests[i].ss) != len(tests[i-1].ss) { - t.Errorf("got %v; want %v", tests[i].ss, tests[i-1].ss) - continue - } - for j, s1 := range tests[i].ss { - s0 := tests[i-1].ss[j] - if s1 != s0 { - t.Errorf("got %s; want %s", s1, s0) - } - } - } -} diff --git a/vendor/golang.org/x/net/route/message_test.go b/vendor/golang.org/x/net/route/message_test.go deleted file mode 100644 index e848dabf..00000000 --- a/vendor/golang.org/x/net/route/message_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package route - -import ( - "os" - "syscall" - "testing" - "time" -) - -func TestFetchAndParseRIB(t *testing.T) { - for _, typ := range []RIBType{sysNET_RT_DUMP, sysNET_RT_IFLIST} { - var lastErr error - var ms []Message - for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { - rs, err := fetchAndParseRIB(af, typ) - if err != nil { - lastErr = err - continue - } - ms = append(ms, rs...) - } - if len(ms) == 0 && lastErr != nil { - t.Error(typ, lastErr) - continue - } - ss, err := msgs(ms).validate() - if err != nil { - t.Error(typ, err) - continue - } - for _, s := range ss { - t.Log(typ, s) - } - } -} - -var ( - rtmonSock int - rtmonErr error -) - -func init() { - // We need to keep rtmonSock alive to avoid treading on - // recycled socket descriptors. - rtmonSock, rtmonErr = syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) -} - -// TestMonitorAndParseRIB leaks a worker goroutine and a socket -// descriptor but that's intentional. -func TestMonitorAndParseRIB(t *testing.T) { - if testing.Short() || os.Getuid() != 0 { - t.Skip("must be root") - } - - if rtmonErr != nil { - t.Fatal(rtmonErr) - } - - // We suppose that using an IPv4 link-local address and the - // dot1Q ID for Token Ring and FDDI doesn't harm anyone. - pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} - if err := pv.configure(1002); err != nil { - t.Skip(err) - } - if err := pv.setup(); err != nil { - t.Skip(err) - } - pv.teardown() - - go func() { - b := make([]byte, os.Getpagesize()) - for { - // There's no easy way to unblock this read - // call because the routing message exchange - // over routing socket is a connectionless - // message-oriented protocol, no control plane - // for signaling connectivity, and we cannot - // use the net package of standard library due - // to the lack of support for routing socket - // and circular dependency. - n, err := syscall.Read(rtmonSock, b) - if err != nil { - return - } - ms, err := ParseRIB(0, b[:n]) - if err != nil { - t.Error(err) - return - } - ss, err := msgs(ms).validate() - if err != nil { - t.Error(err) - return - } - for _, s := range ss { - t.Log(s) - } - } - }() - - for _, vid := range []int{1002, 1003, 1004, 1005} { - pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} - if err := pv.configure(vid); err != nil { - t.Fatal(err) - } - if err := pv.setup(); err != nil { - t.Fatal(err) - } - time.Sleep(200 * time.Millisecond) - if err := pv.teardown(); err != nil { - t.Fatal(err) - } - time.Sleep(200 * time.Millisecond) - } -} - -func TestParseRIBWithFuzz(t *testing.T) { - for _, fuzz := range []string{ - "0\x00\x05\x050000000000000000" + - "00000000000000000000" + - "00000000000000000000" + - "00000000000000000000" + - "0000000000000\x02000000" + - "00000000", - "\x02\x00\x05\f0000000000000000" + - "0\x0200000000000000", - "\x02\x00\x05\x100000000000000\x1200" + - "0\x00\xff\x00", - "\x02\x00\x05\f0000000000000000" + - "0\x12000\x00\x02\x0000", - "\x00\x00\x00\x01\x00", - "00000", - } { - for typ := RIBType(0); typ < 256; typ++ { - ParseRIB(typ, []byte(fuzz)) - } - } -} - -func TestRouteMessage(t *testing.T) { - s, err := syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) - if err != nil { - t.Fatal(err) - } - defer syscall.Close(s) - - var ms []RouteMessage - for _, af := range []int{sysAF_INET, sysAF_INET6} { - if _, err := fetchAndParseRIB(af, sysNET_RT_DUMP); err != nil { - t.Log(err) - continue - } - switch af { - case sysAF_INET: - ms = append(ms, []RouteMessage{ - { - Type: sysRTM_GET, - Addrs: []Addr{ - &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, - nil, - nil, - nil, - &LinkAddr{}, - &Inet4Addr{}, - nil, - &Inet4Addr{}, - }, - }, - { - Type: sysRTM_GET, - Addrs: []Addr{ - &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, - }, - }, - }...) - case sysAF_INET6: - ms = append(ms, []RouteMessage{ - { - Type: sysRTM_GET, - Addrs: []Addr{ - &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, - nil, - nil, - nil, - &LinkAddr{}, - &Inet6Addr{}, - nil, - &Inet6Addr{}, - }, - }, - { - Type: sysRTM_GET, - Addrs: []Addr{ - &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, - }, - }, - }...) - } - } - for i, m := range ms { - m.ID = uintptr(os.Getpid()) - m.Seq = i + 1 - wb, err := m.Marshal() - if err != nil { - t.Fatalf("%v: %v", m, err) - } - if _, err := syscall.Write(s, wb); err != nil { - t.Fatalf("%v: %v", m, err) - } - rb := make([]byte, os.Getpagesize()) - n, err := syscall.Read(s, rb) - if err != nil { - t.Fatalf("%v: %v", m, err) - } - rms, err := ParseRIB(0, rb[:n]) - if err != nil { - t.Fatalf("%v: %v", m, err) - } - for _, rm := range rms { - err := rm.(*RouteMessage).Err - if err != nil { - t.Errorf("%v: %v", m, err) - } - } - ss, err := msgs(rms).validate() - if err != nil { - t.Fatalf("%v: %v", m, err) - } - for _, s := range ss { - t.Log(s) - } - } -} diff --git a/vendor/golang.org/x/net/route/route.go b/vendor/golang.org/x/net/route/route.go deleted file mode 100644 index 081da0d5..00000000 --- a/vendor/golang.org/x/net/route/route.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -// Package route provides basic functions for the manipulation of -// packet routing facilities on BSD variants. -// -// The package supports any version of Darwin, any version of -// DragonFly BSD, FreeBSD 7 through 11, NetBSD 6 and above, and -// OpenBSD 5.6 and above. -package route - -import ( - "errors" - "os" - "syscall" -) - -var ( - errUnsupportedMessage = errors.New("unsupported message") - errMessageMismatch = errors.New("message mismatch") - errMessageTooShort = errors.New("message too short") - errInvalidMessage = errors.New("invalid message") - errInvalidAddr = errors.New("invalid address") - errShortBuffer = errors.New("short buffer") -) - -// A RouteMessage represents a message conveying an address prefix, a -// nexthop address and an output interface. -// -// Unlike other messages, this message can be used to query adjacency -// information for the given address prefix, to add a new route, and -// to delete or modify the existing route from the routing information -// base inside the kernel by writing and reading route messages on a -// routing socket. -// -// For the manipulation of routing information, the route message must -// contain appropriate fields that include: -// -// Version = -// Type = -// Flags = -// Index = -// ID = -// Seq = -// Addrs = -// -// The Type field specifies a type of manipulation, the Flags field -// specifies a class of target information and the Addrs field -// specifies target information like the following: -// -// route.RouteMessage{ -// Version: RTM_VERSION, -// Type: RTM_GET, -// Flags: RTF_UP | RTF_HOST, -// ID: uintptr(os.Getpid()), -// Seq: 1, -// Addrs: []route.Addrs{ -// RTAX_DST: &route.Inet4Addr{ ... }, -// RTAX_IFP: &route.LinkAddr{ ... }, -// RTAX_BRD: &route.Inet4Addr{ ... }, -// }, -// } -// -// The values for the above fields depend on the implementation of -// each operating system. -// -// The Err field on a response message contains an error value on the -// requested operation. If non-nil, the requested operation is failed. -type RouteMessage struct { - Version int // message version - Type int // message type - Flags int // route flags - Index int // interface index when atatched - ID uintptr // sender's identifier; usually process ID - Seq int // sequence number - Err error // error on requested operation - Addrs []Addr // addresses - - extOff int // offset of header extension - raw []byte // raw message -} - -// Marshal returns the binary encoding of m. -func (m *RouteMessage) Marshal() ([]byte, error) { - return m.marshal() -} - -// A RIBType reprensents a type of routing information base. -type RIBType int - -const ( - RIBTypeRoute RIBType = syscall.NET_RT_DUMP - RIBTypeInterface RIBType = syscall.NET_RT_IFLIST -) - -// FetchRIB fetches a routing information base from the operating -// system. -// -// The provided af must be an address family. -// -// The provided arg must be a RIBType-specific argument. -// When RIBType is related to routes, arg might be a set of route -// flags. When RIBType is related to network interfaces, arg might be -// an interface index or a set of interface flags. In most cases, zero -// means a wildcard. -func FetchRIB(af int, typ RIBType, arg int) ([]byte, error) { - mib := [6]int32{sysCTL_NET, sysAF_ROUTE, 0, int32(af), int32(typ), int32(arg)} - n := uintptr(0) - if err := sysctl(mib[:], nil, &n, nil, 0); err != nil { - return nil, os.NewSyscallError("sysctl", err) - } - if n == 0 { - return nil, nil - } - b := make([]byte, n) - if err := sysctl(mib[:], &b[0], &n, nil, 0); err != nil { - return nil, os.NewSyscallError("sysctl", err) - } - return b[:n], nil -} diff --git a/vendor/golang.org/x/net/route/route_classic.go b/vendor/golang.org/x/net/route/route_classic.go deleted file mode 100644 index 61b2bb4a..00000000 --- a/vendor/golang.org/x/net/route/route_classic.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd - -package route - -import "syscall" - -func (m *RouteMessage) marshal() ([]byte, error) { - w, ok := wireFormats[m.Type] - if !ok { - return nil, errUnsupportedMessage - } - l := w.bodyOff + addrsSpace(m.Addrs) - b := make([]byte, l) - nativeEndian.PutUint16(b[:2], uint16(l)) - if m.Version == 0 { - b[2] = sysRTM_VERSION - } else { - b[2] = byte(m.Version) - } - b[3] = byte(m.Type) - nativeEndian.PutUint32(b[8:12], uint32(m.Flags)) - nativeEndian.PutUint16(b[4:6], uint16(m.Index)) - nativeEndian.PutUint32(b[16:20], uint32(m.ID)) - nativeEndian.PutUint32(b[20:24], uint32(m.Seq)) - attrs, err := marshalAddrs(b[w.bodyOff:], m.Addrs) - if err != nil { - return nil, err - } - if attrs > 0 { - nativeEndian.PutUint32(b[12:16], uint32(attrs)) - } - return b, nil -} - -func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) { - if len(b) < w.bodyOff { - return nil, errMessageTooShort - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - m := &RouteMessage{ - Version: int(b[2]), - Type: int(b[3]), - Flags: int(nativeEndian.Uint32(b[8:12])), - Index: int(nativeEndian.Uint16(b[4:6])), - ID: uintptr(nativeEndian.Uint32(b[16:20])), - Seq: int(nativeEndian.Uint32(b[20:24])), - extOff: w.extOff, - raw: b[:l], - } - errno := syscall.Errno(nativeEndian.Uint32(b[28:32])) - if errno != 0 { - m.Err = errno - } - var err error - m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:]) - if err != nil { - return nil, err - } - return m, nil -} diff --git a/vendor/golang.org/x/net/route/route_openbsd.go b/vendor/golang.org/x/net/route/route_openbsd.go deleted file mode 100644 index daf2e90c..00000000 --- a/vendor/golang.org/x/net/route/route_openbsd.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -import "syscall" - -func (m *RouteMessage) marshal() ([]byte, error) { - l := sizeofRtMsghdr + addrsSpace(m.Addrs) - b := make([]byte, l) - nativeEndian.PutUint16(b[:2], uint16(l)) - if m.Version == 0 { - b[2] = sysRTM_VERSION - } else { - b[2] = byte(m.Version) - } - b[3] = byte(m.Type) - nativeEndian.PutUint16(b[4:6], uint16(sizeofRtMsghdr)) - nativeEndian.PutUint32(b[16:20], uint32(m.Flags)) - nativeEndian.PutUint16(b[6:8], uint16(m.Index)) - nativeEndian.PutUint32(b[24:28], uint32(m.ID)) - nativeEndian.PutUint32(b[28:32], uint32(m.Seq)) - attrs, err := marshalAddrs(b[sizeofRtMsghdr:], m.Addrs) - if err != nil { - return nil, err - } - if attrs > 0 { - nativeEndian.PutUint32(b[12:16], uint32(attrs)) - } - return b, nil -} - -func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) { - if len(b) < sizeofRtMsghdr { - return nil, errMessageTooShort - } - l := int(nativeEndian.Uint16(b[:2])) - if len(b) < l { - return nil, errInvalidMessage - } - m := &RouteMessage{ - Version: int(b[2]), - Type: int(b[3]), - Flags: int(nativeEndian.Uint32(b[16:20])), - Index: int(nativeEndian.Uint16(b[6:8])), - ID: uintptr(nativeEndian.Uint32(b[24:28])), - Seq: int(nativeEndian.Uint32(b[28:32])), - raw: b[:l], - } - ll := int(nativeEndian.Uint16(b[4:6])) - if len(b) < ll { - return nil, errInvalidMessage - } - errno := syscall.Errno(nativeEndian.Uint32(b[32:36])) - if errno != 0 { - m.Err = errno - } - as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:]) - if err != nil { - return nil, err - } - m.Addrs = as - return m, nil -} diff --git a/vendor/golang.org/x/net/route/route_test.go b/vendor/golang.org/x/net/route/route_test.go deleted file mode 100644 index 61bd1745..00000000 --- a/vendor/golang.org/x/net/route/route_test.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package route - -import ( - "fmt" - "os/exec" - "runtime" - "time" -) - -func (m *RouteMessage) String() string { - return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[12:16]))) -} - -func (m *InterfaceMessage) String() string { - var attrs addrAttrs - if runtime.GOOS == "openbsd" { - attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) - } else { - attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) - } - return fmt.Sprintf("%s", attrs) -} - -func (m *InterfaceAddrMessage) String() string { - var attrs addrAttrs - if runtime.GOOS == "openbsd" { - attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) - } else { - attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) - } - return fmt.Sprintf("%s", attrs) -} - -func (m *InterfaceMulticastAddrMessage) String() string { - return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[4:8]))) -} - -func (m *InterfaceAnnounceMessage) String() string { - what := "" - switch m.What { - case 0: - what = "arrival" - case 1: - what = "departure" - } - return fmt.Sprintf("(%d %s %s)", m.Index, m.Name, what) -} - -func (m *InterfaceMetrics) String() string { - return fmt.Sprintf("(type=%d mtu=%d)", m.Type, m.MTU) -} - -func (m *RouteMetrics) String() string { - return fmt.Sprintf("(pmtu=%d)", m.PathMTU) -} - -type addrAttrs uint - -var addrAttrNames = [...]string{ - "dst", - "gateway", - "netmask", - "genmask", - "ifp", - "ifa", - "author", - "brd", - "df:mpls1-n:tag-o:src", // mpls1 for dragonfly, tag for netbsd, src for openbsd - "df:mpls2-o:srcmask", // mpls2 for dragonfly, srcmask for openbsd - "df:mpls3-o:label", // mpls3 for dragonfly, label for openbsd - "o:bfd", // bfd for openbsd - "o:dns", // dns for openbsd - "o:static", // static for openbsd - "o:search", // search for openbsd -} - -func (attrs addrAttrs) String() string { - var s string - for i, name := range addrAttrNames { - if attrs&(1<" - } - return s -} - -type msgs []Message - -func (ms msgs) validate() ([]string, error) { - var ss []string - for _, m := range ms { - switch m := m.(type) { - case *RouteMessage: - if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[12:16]))); err != nil { - return nil, err - } - sys := m.Sys() - if sys == nil { - return nil, fmt.Errorf("no sys for %s", m.String()) - } - ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) - case *InterfaceMessage: - var attrs addrAttrs - if runtime.GOOS == "openbsd" { - attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) - } else { - attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) - } - if err := addrs(m.Addrs).match(attrs); err != nil { - return nil, err - } - sys := m.Sys() - if sys == nil { - return nil, fmt.Errorf("no sys for %s", m.String()) - } - ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) - case *InterfaceAddrMessage: - var attrs addrAttrs - if runtime.GOOS == "openbsd" { - attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) - } else { - attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) - } - if err := addrs(m.Addrs).match(attrs); err != nil { - return nil, err - } - ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) - case *InterfaceMulticastAddrMessage: - if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[4:8]))); err != nil { - return nil, err - } - ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) - case *InterfaceAnnounceMessage: - ss = append(ss, m.String()) - default: - ss = append(ss, fmt.Sprintf("%+v", m)) - } - } - return ss, nil -} - -type syss []Sys - -func (sys syss) String() string { - var s string - for _, sy := range sys { - switch sy := sy.(type) { - case *InterfaceMetrics: - if len(s) > 0 { - s += " " - } - s += sy.String() - case *RouteMetrics: - if len(s) > 0 { - s += " " - } - s += sy.String() - } - } - return s -} - -type addrFamily int - -func (af addrFamily) String() string { - switch af { - case sysAF_UNSPEC: - return "unspec" - case sysAF_LINK: - return "link" - case sysAF_INET: - return "inet4" - case sysAF_INET6: - return "inet6" - default: - return fmt.Sprintf("%d", af) - } -} - -const hexDigit = "0123456789abcdef" - -type llAddr []byte - -func (a llAddr) String() string { - if len(a) == 0 { - return "" - } - buf := make([]byte, 0, len(a)*3-1) - for i, b := range a { - if i > 0 { - buf = append(buf, ':') - } - buf = append(buf, hexDigit[b>>4]) - buf = append(buf, hexDigit[b&0xF]) - } - return string(buf) -} - -type ipAddr []byte - -func (a ipAddr) String() string { - if len(a) == 0 { - return "" - } - if len(a) == 4 { - return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) - } - if len(a) == 16 { - return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) - } - s := make([]byte, len(a)*2) - for i, tn := range a { - s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] - } - return string(s) -} - -func (a *LinkAddr) String() string { - name := a.Name - if name == "" { - name = "" - } - lla := llAddr(a.Addr).String() - if lla == "" { - lla = "" - } - return fmt.Sprintf("(%v %d %s %s)", addrFamily(a.Family()), a.Index, name, lla) -} - -func (a *Inet4Addr) String() string { - return fmt.Sprintf("(%v %v)", addrFamily(a.Family()), ipAddr(a.IP[:])) -} - -func (a *Inet6Addr) String() string { - return fmt.Sprintf("(%v %v %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.ZoneID) -} - -func (a *DefaultAddr) String() string { - return fmt.Sprintf("(%v %s)", addrFamily(a.Family()), ipAddr(a.Raw[2:]).String()) -} - -type addrs []Addr - -func (as addrs) String() string { - var s string - for _, a := range as { - if a == nil { - continue - } - if len(s) > 0 { - s += " " - } - switch a := a.(type) { - case *LinkAddr: - s += a.String() - case *Inet4Addr: - s += a.String() - case *Inet6Addr: - s += a.String() - case *DefaultAddr: - s += a.String() - } - } - if s == "" { - return "" - } - return s -} - -func (as addrs) match(attrs addrAttrs) error { - var ts addrAttrs - af := sysAF_UNSPEC - for i := range as { - if as[i] != nil { - ts |= 1 << uint(i) - } - switch as[i].(type) { - case *Inet4Addr: - if af == sysAF_UNSPEC { - af = sysAF_INET - } - if af != sysAF_INET { - return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) - } - case *Inet6Addr: - if af == sysAF_UNSPEC { - af = sysAF_INET6 - } - if af != sysAF_INET6 { - return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) - } - } - } - if ts != attrs && ts > attrs { - return fmt.Errorf("%v not included in %v", ts, attrs) - } - return nil -} - -func fetchAndParseRIB(af int, typ RIBType) ([]Message, error) { - var err error - var b []byte - for i := 0; i < 3; i++ { - if b, err = FetchRIB(af, typ, 0); err != nil { - time.Sleep(10 * time.Millisecond) - continue - } - break - } - if err != nil { - return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) - } - ms, err := ParseRIB(typ, b) - if err != nil { - return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) - } - return ms, nil -} - -// propVirtual is a proprietary virtual network interface. -type propVirtual struct { - name string - addr, mask string - setupCmds []*exec.Cmd - teardownCmds []*exec.Cmd -} - -func (pv *propVirtual) setup() error { - for _, cmd := range pv.setupCmds { - if err := cmd.Run(); err != nil { - pv.teardown() - return err - } - } - return nil -} - -func (pv *propVirtual) teardown() error { - for _, cmd := range pv.teardownCmds { - if err := cmd.Run(); err != nil { - return err - } - } - return nil -} - -func (pv *propVirtual) configure(suffix int) error { - if runtime.GOOS == "openbsd" { - pv.name = fmt.Sprintf("vether%d", suffix) - } else { - pv.name = fmt.Sprintf("vlan%d", suffix) - } - xname, err := exec.LookPath("ifconfig") - if err != nil { - return err - } - pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ - Path: xname, - Args: []string{"ifconfig", pv.name, "create"}, - }) - if runtime.GOOS == "netbsd" { - // NetBSD requires an underlying dot1Q-capable network - // interface. - pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ - Path: xname, - Args: []string{"ifconfig", pv.name, "vlan", fmt.Sprintf("%d", suffix&0xfff), "vlanif", "wm0"}, - }) - } - pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ - Path: xname, - Args: []string{"ifconfig", pv.name, "inet", pv.addr, "netmask", pv.mask}, - }) - pv.teardownCmds = append(pv.teardownCmds, &exec.Cmd{ - Path: xname, - Args: []string{"ifconfig", pv.name, "destroy"}, - }) - return nil -} diff --git a/vendor/golang.org/x/net/route/sys.go b/vendor/golang.org/x/net/route/sys.go deleted file mode 100644 index 3d0ee9b1..00000000 --- a/vendor/golang.org/x/net/route/sys.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package route - -import "unsafe" - -var ( - nativeEndian binaryByteOrder - kernelAlign int - wireFormats map[int]*wireFormat -) - -func init() { - i := uint32(1) - b := (*[4]byte)(unsafe.Pointer(&i)) - if b[0] == 1 { - nativeEndian = littleEndian - } else { - nativeEndian = bigEndian - } - kernelAlign, wireFormats = probeRoutingStack() -} - -func roundup(l int) int { - if l == 0 { - return kernelAlign - } - return (l + kernelAlign - 1) & ^(kernelAlign - 1) -} - -type wireFormat struct { - extOff int // offset of header extension - bodyOff int // offset of message body - parse func(RIBType, []byte) (Message, error) -} diff --git a/vendor/golang.org/x/net/route/sys_darwin.go b/vendor/golang.org/x/net/route/sys_darwin.go deleted file mode 100644 index d2daf5c0..00000000 --- a/vendor/golang.org/x/net/route/sys_darwin.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -func (typ RIBType) parseable() bool { - switch typ { - case sysNET_RT_STAT, sysNET_RT_TRASH: - return false - default: - return true - } -} - -// RouteMetrics represents route metrics. -type RouteMetrics struct { - PathMTU int // path maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *RouteMessage) Sys() []Sys { - return []Sys{ - &RouteMetrics{ - PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), - }, - } -} - -// InterfaceMetrics represents interface metrics. -type InterfaceMetrics struct { - Type int // interface type - MTU int // maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *InterfaceMessage) Sys() []Sys { - return []Sys{ - &InterfaceMetrics{ - Type: int(m.raw[m.extOff]), - MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), - }, - } -} - -func probeRoutingStack() (int, map[int]*wireFormat) { - rtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15} - rtm.parse = rtm.parseRouteMessage - rtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15} - rtm2.parse = rtm2.parseRouteMessage - ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15} - ifm.parse = ifm.parseInterfaceMessage - ifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15} - ifm2.parse = ifm2.parseInterfaceMessage - ifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15} - ifam.parse = ifam.parseInterfaceAddrMessage - ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15} - ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage - ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15} - ifmam2.parse = ifmam2.parseInterfaceMulticastAddrMessage - // Darwin kernels require 32-bit aligned access to routing facilities. - return 4, map[int]*wireFormat{ - sysRTM_ADD: rtm, - sysRTM_DELETE: rtm, - sysRTM_CHANGE: rtm, - sysRTM_GET: rtm, - sysRTM_LOSING: rtm, - sysRTM_REDIRECT: rtm, - sysRTM_MISS: rtm, - sysRTM_LOCK: rtm, - sysRTM_RESOLVE: rtm, - sysRTM_NEWADDR: ifam, - sysRTM_DELADDR: ifam, - sysRTM_IFINFO: ifm, - sysRTM_NEWMADDR: ifmam, - sysRTM_DELMADDR: ifmam, - sysRTM_IFINFO2: ifm2, - sysRTM_NEWMADDR2: ifmam2, - sysRTM_GET2: rtm2, - } -} diff --git a/vendor/golang.org/x/net/route/sys_dragonfly.go b/vendor/golang.org/x/net/route/sys_dragonfly.go deleted file mode 100644 index 0c14bc2b..00000000 --- a/vendor/golang.org/x/net/route/sys_dragonfly.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -import "unsafe" - -func (typ RIBType) parseable() bool { return true } - -// RouteMetrics represents route metrics. -type RouteMetrics struct { - PathMTU int // path maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *RouteMessage) Sys() []Sys { - return []Sys{ - &RouteMetrics{ - PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), - }, - } -} - -// InterfaceMetrics represents interface metrics. -type InterfaceMetrics struct { - Type int // interface type - MTU int // maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *InterfaceMessage) Sys() []Sys { - return []Sys{ - &InterfaceMetrics{ - Type: int(m.raw[m.extOff]), - MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), - }, - } -} - -func probeRoutingStack() (int, map[int]*wireFormat) { - var p uintptr - rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4} - rtm.parse = rtm.parseRouteMessage - ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4} - ifm.parse = ifm.parseInterfaceMessage - ifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4} - ifam.parse = ifam.parseInterfaceAddrMessage - ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4} - ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage - ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4} - ifanm.parse = ifanm.parseInterfaceAnnounceMessage - return int(unsafe.Sizeof(p)), map[int]*wireFormat{ - sysRTM_ADD: rtm, - sysRTM_DELETE: rtm, - sysRTM_CHANGE: rtm, - sysRTM_GET: rtm, - sysRTM_LOSING: rtm, - sysRTM_REDIRECT: rtm, - sysRTM_MISS: rtm, - sysRTM_LOCK: rtm, - sysRTM_RESOLVE: rtm, - sysRTM_NEWADDR: ifam, - sysRTM_DELADDR: ifam, - sysRTM_IFINFO: ifm, - sysRTM_NEWMADDR: ifmam, - sysRTM_DELMADDR: ifmam, - sysRTM_IFANNOUNCE: ifanm, - } -} diff --git a/vendor/golang.org/x/net/route/sys_freebsd.go b/vendor/golang.org/x/net/route/sys_freebsd.go deleted file mode 100644 index 89ba1c4e..00000000 --- a/vendor/golang.org/x/net/route/sys_freebsd.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -import ( - "syscall" - "unsafe" -) - -func (typ RIBType) parseable() bool { return true } - -// RouteMetrics represents route metrics. -type RouteMetrics struct { - PathMTU int // path maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *RouteMessage) Sys() []Sys { - if kernelAlign == 8 { - return []Sys{ - &RouteMetrics{ - PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), - }, - } - } - return []Sys{ - &RouteMetrics{ - PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), - }, - } -} - -// InterfaceMetrics represents interface metrics. -type InterfaceMetrics struct { - Type int // interface type - MTU int // maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *InterfaceMessage) Sys() []Sys { - return []Sys{ - &InterfaceMetrics{ - Type: int(m.raw[m.extOff]), - MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), - }, - } -} - -func probeRoutingStack() (int, map[int]*wireFormat) { - var p uintptr - wordSize := int(unsafe.Sizeof(p)) - align := int(unsafe.Sizeof(p)) - // In the case of kern.supported_archs="amd64 i386", we need - // to know the underlying kernel's architecture because the - // alignment for routing facilities are set at the build time - // of the kernel. - conf, _ := syscall.Sysctl("kern.conftxt") - for i, j := 0, 0; j < len(conf); j++ { - if conf[j] != '\n' { - continue - } - s := conf[i:j] - i = j + 1 - if len(s) > len("machine") && s[:len("machine")] == "machine" { - s = s[len("machine"):] - for k := 0; k < len(s); k++ { - if s[k] == ' ' || s[k] == '\t' { - s = s[1:] - } - break - } - if s == "amd64" { - align = 8 - } - break - } - } - var rtm, ifm, ifam, ifmam, ifanm *wireFormat - if align != wordSize { // 386 emulation on amd64 - rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu} - ifm = &wireFormat{extOff: 16} - ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu} - ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu} - ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu} - } else { - rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10} - ifm = &wireFormat{extOff: 16} - ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10} - ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10} - ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10} - } - rel, _ := syscall.SysctlUint32("kern.osreldate") - switch { - case rel < 800000: - if align != wordSize { // 386 emulation on amd64 - ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu - } else { - ifm.bodyOff = sizeofIfMsghdrFreeBSD7 - } - case 800000 <= rel && rel < 900000: - if align != wordSize { // 386 emulation on amd64 - ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu - } else { - ifm.bodyOff = sizeofIfMsghdrFreeBSD8 - } - case 900000 <= rel && rel < 1000000: - if align != wordSize { // 386 emulation on amd64 - ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu - } else { - ifm.bodyOff = sizeofIfMsghdrFreeBSD9 - } - case 1000000 <= rel && rel < 1100000: - if align != wordSize { // 386 emulation on amd64 - ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu - } else { - ifm.bodyOff = sizeofIfMsghdrFreeBSD10 - } - default: - if align != wordSize { // 386 emulation on amd64 - ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu - } else { - ifm.bodyOff = sizeofIfMsghdrFreeBSD11 - } - } - rtm.parse = rtm.parseRouteMessage - ifm.parse = ifm.parseInterfaceMessage - ifam.parse = ifam.parseInterfaceAddrMessage - ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage - ifanm.parse = ifanm.parseInterfaceAnnounceMessage - return align, map[int]*wireFormat{ - sysRTM_ADD: rtm, - sysRTM_DELETE: rtm, - sysRTM_CHANGE: rtm, - sysRTM_GET: rtm, - sysRTM_LOSING: rtm, - sysRTM_REDIRECT: rtm, - sysRTM_MISS: rtm, - sysRTM_LOCK: rtm, - sysRTM_RESOLVE: rtm, - sysRTM_NEWADDR: ifam, - sysRTM_DELADDR: ifam, - sysRTM_IFINFO: ifm, - sysRTM_NEWMADDR: ifmam, - sysRTM_DELMADDR: ifmam, - sysRTM_IFANNOUNCE: ifanm, - } -} diff --git a/vendor/golang.org/x/net/route/sys_netbsd.go b/vendor/golang.org/x/net/route/sys_netbsd.go deleted file mode 100644 index 02f71d54..00000000 --- a/vendor/golang.org/x/net/route/sys_netbsd.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -func (typ RIBType) parseable() bool { return true } - -// RouteMetrics represents route metrics. -type RouteMetrics struct { - PathMTU int // path maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *RouteMessage) Sys() []Sys { - return []Sys{ - &RouteMetrics{ - PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), - }, - } -} - -// RouteMetrics represents route metrics. -type InterfaceMetrics struct { - Type int // interface type - MTU int // maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *InterfaceMessage) Sys() []Sys { - return []Sys{ - &InterfaceMetrics{ - Type: int(m.raw[m.extOff]), - MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), - }, - } -} - -func probeRoutingStack() (int, map[int]*wireFormat) { - rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7} - rtm.parse = rtm.parseRouteMessage - ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7} - ifm.parse = ifm.parseInterfaceMessage - ifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7} - ifam.parse = ifam.parseInterfaceAddrMessage - ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7} - ifanm.parse = ifanm.parseInterfaceAnnounceMessage - // NetBSD 6 and above kernels require 64-bit aligned access to - // routing facilities. - return 8, map[int]*wireFormat{ - sysRTM_ADD: rtm, - sysRTM_DELETE: rtm, - sysRTM_CHANGE: rtm, - sysRTM_GET: rtm, - sysRTM_LOSING: rtm, - sysRTM_REDIRECT: rtm, - sysRTM_MISS: rtm, - sysRTM_LOCK: rtm, - sysRTM_RESOLVE: rtm, - sysRTM_NEWADDR: ifam, - sysRTM_DELADDR: ifam, - sysRTM_IFANNOUNCE: ifanm, - sysRTM_IFINFO: ifm, - } -} diff --git a/vendor/golang.org/x/net/route/sys_openbsd.go b/vendor/golang.org/x/net/route/sys_openbsd.go deleted file mode 100644 index c5674e83..00000000 --- a/vendor/golang.org/x/net/route/sys_openbsd.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package route - -import "unsafe" - -func (typ RIBType) parseable() bool { - switch typ { - case sysNET_RT_STATS, sysNET_RT_TABLE: - return false - default: - return true - } -} - -// RouteMetrics represents route metrics. -type RouteMetrics struct { - PathMTU int // path maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *RouteMessage) Sys() []Sys { - return []Sys{ - &RouteMetrics{ - PathMTU: int(nativeEndian.Uint32(m.raw[60:64])), - }, - } -} - -// InterfaceMetrics represents interface metrics. -type InterfaceMetrics struct { - Type int // interface type - MTU int // maximum transmission unit -} - -// SysType implements the SysType method of Sys interface. -func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } - -// Sys implements the Sys method of Message interface. -func (m *InterfaceMessage) Sys() []Sys { - return []Sys{ - &InterfaceMetrics{ - Type: int(m.raw[24]), - MTU: int(nativeEndian.Uint32(m.raw[28:32])), - }, - } -} - -func probeRoutingStack() (int, map[int]*wireFormat) { - var p uintptr - rtm := &wireFormat{extOff: -1, bodyOff: -1} - rtm.parse = rtm.parseRouteMessage - ifm := &wireFormat{extOff: -1, bodyOff: -1} - ifm.parse = ifm.parseInterfaceMessage - ifam := &wireFormat{extOff: -1, bodyOff: -1} - ifam.parse = ifam.parseInterfaceAddrMessage - ifanm := &wireFormat{extOff: -1, bodyOff: -1} - ifanm.parse = ifanm.parseInterfaceAnnounceMessage - return int(unsafe.Sizeof(p)), map[int]*wireFormat{ - sysRTM_ADD: rtm, - sysRTM_DELETE: rtm, - sysRTM_CHANGE: rtm, - sysRTM_GET: rtm, - sysRTM_LOSING: rtm, - sysRTM_REDIRECT: rtm, - sysRTM_MISS: rtm, - sysRTM_LOCK: rtm, - sysRTM_RESOLVE: rtm, - sysRTM_NEWADDR: ifam, - sysRTM_DELADDR: ifam, - sysRTM_IFINFO: ifm, - sysRTM_IFANNOUNCE: ifanm, - sysRTM_DESYNC: rtm, - } -} diff --git a/vendor/golang.org/x/net/route/syscall.go b/vendor/golang.org/x/net/route/syscall.go deleted file mode 100644 index c211188b..00000000 --- a/vendor/golang.org/x/net/route/syscall.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package route - -import ( - "syscall" - "unsafe" -) - -var zero uintptr - -func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { - var p unsafe.Pointer - if len(mib) > 0 { - p = unsafe.Pointer(&mib[0]) - } else { - p = unsafe.Pointer(&zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if errno != 0 { - return error(errno) - } - return nil -} diff --git a/vendor/golang.org/x/net/route/zsys_darwin.go b/vendor/golang.org/x/net/route/zsys_darwin.go deleted file mode 100644 index 4e2e1ab0..00000000 --- a/vendor/golang.org/x/net/route/zsys_darwin.go +++ /dev/null @@ -1,99 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs defs_darwin.go - -package route - -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_ROUTE = 0x11 - sysAF_LINK = 0x12 - sysAF_INET6 = 0x1e - - sysSOCK_RAW = 0x3 - - sysNET_RT_DUMP = 0x1 - sysNET_RT_FLAGS = 0x2 - sysNET_RT_IFLIST = 0x3 - sysNET_RT_STAT = 0x4 - sysNET_RT_TRASH = 0x5 - sysNET_RT_IFLIST2 = 0x6 - sysNET_RT_DUMP2 = 0x7 - sysNET_RT_MAXID = 0xa -) - -const ( - sysCTL_MAXNAME = 0xc - - sysCTL_UNSPEC = 0x0 - sysCTL_KERN = 0x1 - sysCTL_VM = 0x2 - sysCTL_VFS = 0x3 - sysCTL_NET = 0x4 - sysCTL_DEBUG = 0x5 - sysCTL_HW = 0x6 - sysCTL_MACHDEP = 0x7 - sysCTL_USER = 0x8 - sysCTL_MAXID = 0x9 -) - -const ( - sysRTM_VERSION = 0x5 - - sysRTM_ADD = 0x1 - sysRTM_DELETE = 0x2 - sysRTM_CHANGE = 0x3 - sysRTM_GET = 0x4 - sysRTM_LOSING = 0x5 - sysRTM_REDIRECT = 0x6 - sysRTM_MISS = 0x7 - sysRTM_LOCK = 0x8 - sysRTM_OLDADD = 0x9 - sysRTM_OLDDEL = 0xa - sysRTM_RESOLVE = 0xb - sysRTM_NEWADDR = 0xc - sysRTM_DELADDR = 0xd - sysRTM_IFINFO = 0xe - sysRTM_NEWMADDR = 0xf - sysRTM_DELMADDR = 0x10 - sysRTM_IFINFO2 = 0x12 - sysRTM_NEWMADDR2 = 0x13 - sysRTM_GET2 = 0x14 - - sysRTA_DST = 0x1 - sysRTA_GATEWAY = 0x2 - sysRTA_NETMASK = 0x4 - sysRTA_GENMASK = 0x8 - sysRTA_IFP = 0x10 - sysRTA_IFA = 0x20 - sysRTA_AUTHOR = 0x40 - sysRTA_BRD = 0x80 - - sysRTAX_DST = 0x0 - sysRTAX_GATEWAY = 0x1 - sysRTAX_NETMASK = 0x2 - sysRTAX_GENMASK = 0x3 - sysRTAX_IFP = 0x4 - sysRTAX_IFA = 0x5 - sysRTAX_AUTHOR = 0x6 - sysRTAX_BRD = 0x7 - sysRTAX_MAX = 0x8 -) - -const ( - sizeofIfMsghdrDarwin15 = 0x70 - sizeofIfaMsghdrDarwin15 = 0x14 - sizeofIfmaMsghdrDarwin15 = 0x10 - sizeofIfMsghdr2Darwin15 = 0xa0 - sizeofIfmaMsghdr2Darwin15 = 0x14 - sizeofIfDataDarwin15 = 0x60 - sizeofIfData64Darwin15 = 0x80 - - sizeofRtMsghdrDarwin15 = 0x5c - sizeofRtMsghdr2Darwin15 = 0x5c - sizeofRtMetricsDarwin15 = 0x38 - - sizeofSockaddrStorage = 0x80 - sizeofSockaddrInet = 0x10 - sizeofSockaddrInet6 = 0x1c -) diff --git a/vendor/golang.org/x/net/route/zsys_dragonfly.go b/vendor/golang.org/x/net/route/zsys_dragonfly.go deleted file mode 100644 index 719c88d1..00000000 --- a/vendor/golang.org/x/net/route/zsys_dragonfly.go +++ /dev/null @@ -1,98 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs defs_dragonfly.go - -package route - -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_ROUTE = 0x11 - sysAF_LINK = 0x12 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 - - sysNET_RT_DUMP = 0x1 - sysNET_RT_FLAGS = 0x2 - sysNET_RT_IFLIST = 0x3 - sysNET_RT_MAXID = 0x4 -) - -const ( - sysCTL_MAXNAME = 0xc - - sysCTL_UNSPEC = 0x0 - sysCTL_KERN = 0x1 - sysCTL_VM = 0x2 - sysCTL_VFS = 0x3 - sysCTL_NET = 0x4 - sysCTL_DEBUG = 0x5 - sysCTL_HW = 0x6 - sysCTL_MACHDEP = 0x7 - sysCTL_USER = 0x8 - sysCTL_P1003_1B = 0x9 - sysCTL_LWKT = 0xa - sysCTL_MAXID = 0xb -) - -const ( - sysRTM_VERSION = 0x6 - - sysRTM_ADD = 0x1 - sysRTM_DELETE = 0x2 - sysRTM_CHANGE = 0x3 - sysRTM_GET = 0x4 - sysRTM_LOSING = 0x5 - sysRTM_REDIRECT = 0x6 - sysRTM_MISS = 0x7 - sysRTM_LOCK = 0x8 - sysRTM_OLDADD = 0x9 - sysRTM_OLDDEL = 0xa - sysRTM_RESOLVE = 0xb - sysRTM_NEWADDR = 0xc - sysRTM_DELADDR = 0xd - sysRTM_IFINFO = 0xe - sysRTM_NEWMADDR = 0xf - sysRTM_DELMADDR = 0x10 - sysRTM_IFANNOUNCE = 0x11 - sysRTM_IEEE80211 = 0x12 - - sysRTA_DST = 0x1 - sysRTA_GATEWAY = 0x2 - sysRTA_NETMASK = 0x4 - sysRTA_GENMASK = 0x8 - sysRTA_IFP = 0x10 - sysRTA_IFA = 0x20 - sysRTA_AUTHOR = 0x40 - sysRTA_BRD = 0x80 - sysRTA_MPLS1 = 0x100 - sysRTA_MPLS2 = 0x200 - sysRTA_MPLS3 = 0x400 - - sysRTAX_DST = 0x0 - sysRTAX_GATEWAY = 0x1 - sysRTAX_NETMASK = 0x2 - sysRTAX_GENMASK = 0x3 - sysRTAX_IFP = 0x4 - sysRTAX_IFA = 0x5 - sysRTAX_AUTHOR = 0x6 - sysRTAX_BRD = 0x7 - sysRTAX_MPLS1 = 0x8 - sysRTAX_MPLS2 = 0x9 - sysRTAX_MPLS3 = 0xa - sysRTAX_MAX = 0xb -) - -const ( - sizeofIfMsghdrDragonFlyBSD4 = 0xb0 - sizeofIfaMsghdrDragonFlyBSD4 = 0x14 - sizeofIfmaMsghdrDragonFlyBSD4 = 0x10 - sizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18 - - sizeofRtMsghdrDragonFlyBSD4 = 0x98 - sizeofRtMetricsDragonFlyBSD4 = 0x70 - - sizeofSockaddrStorage = 0x80 - sizeofSockaddrInet = 0x10 - sizeofSockaddrInet6 = 0x1c -) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_386.go b/vendor/golang.org/x/net/route/zsys_freebsd_386.go deleted file mode 100644 index b03bc01f..00000000 --- a/vendor/golang.org/x/net/route/zsys_freebsd_386.go +++ /dev/null @@ -1,126 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs defs_freebsd.go - -package route - -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_ROUTE = 0x11 - sysAF_LINK = 0x12 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 - - sysNET_RT_DUMP = 0x1 - sysNET_RT_FLAGS = 0x2 - sysNET_RT_IFLIST = 0x3 - sysNET_RT_IFMALIST = 0x4 - sysNET_RT_IFLISTL = 0x5 -) - -const ( - sysCTL_MAXNAME = 0x18 - - sysCTL_UNSPEC = 0x0 - sysCTL_KERN = 0x1 - sysCTL_VM = 0x2 - sysCTL_VFS = 0x3 - sysCTL_NET = 0x4 - sysCTL_DEBUG = 0x5 - sysCTL_HW = 0x6 - sysCTL_MACHDEP = 0x7 - sysCTL_USER = 0x8 - sysCTL_P1003_1B = 0x9 -) - -const ( - sysRTM_VERSION = 0x5 - - sysRTM_ADD = 0x1 - sysRTM_DELETE = 0x2 - sysRTM_CHANGE = 0x3 - sysRTM_GET = 0x4 - sysRTM_LOSING = 0x5 - sysRTM_REDIRECT = 0x6 - sysRTM_MISS = 0x7 - sysRTM_LOCK = 0x8 - sysRTM_RESOLVE = 0xb - sysRTM_NEWADDR = 0xc - sysRTM_DELADDR = 0xd - sysRTM_IFINFO = 0xe - sysRTM_NEWMADDR = 0xf - sysRTM_DELMADDR = 0x10 - sysRTM_IFANNOUNCE = 0x11 - sysRTM_IEEE80211 = 0x12 - - sysRTA_DST = 0x1 - sysRTA_GATEWAY = 0x2 - sysRTA_NETMASK = 0x4 - sysRTA_GENMASK = 0x8 - sysRTA_IFP = 0x10 - sysRTA_IFA = 0x20 - sysRTA_AUTHOR = 0x40 - sysRTA_BRD = 0x80 - - sysRTAX_DST = 0x0 - sysRTAX_GATEWAY = 0x1 - sysRTAX_NETMASK = 0x2 - sysRTAX_GENMASK = 0x3 - sysRTAX_IFP = 0x4 - sysRTAX_IFA = 0x5 - sysRTAX_AUTHOR = 0x6 - sysRTAX_BRD = 0x7 - sysRTAX_MAX = 0x8 -) - -const ( - sizeofIfMsghdrlFreeBSD10 = 0x68 - sizeofIfaMsghdrFreeBSD10 = 0x14 - sizeofIfaMsghdrlFreeBSD10 = 0x6c - sizeofIfmaMsghdrFreeBSD10 = 0x10 - sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 - - sizeofRtMsghdrFreeBSD10 = 0x5c - sizeofRtMetricsFreeBSD10 = 0x38 - - sizeofIfMsghdrFreeBSD7 = 0x60 - sizeofIfMsghdrFreeBSD8 = 0x60 - sizeofIfMsghdrFreeBSD9 = 0x60 - sizeofIfMsghdrFreeBSD10 = 0x64 - sizeofIfMsghdrFreeBSD11 = 0xa8 - - sizeofIfDataFreeBSD7 = 0x50 - sizeofIfDataFreeBSD8 = 0x50 - sizeofIfDataFreeBSD9 = 0x50 - sizeofIfDataFreeBSD10 = 0x54 - sizeofIfDataFreeBSD11 = 0x98 - - // MODIFIED BY HAND FOR 386 EMULATION ON AMD64 - // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT - - sizeofIfMsghdrlFreeBSD10Emu = 0xb0 - sizeofIfaMsghdrFreeBSD10Emu = 0x14 - sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 - sizeofIfmaMsghdrFreeBSD10Emu = 0x10 - sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 - - sizeofRtMsghdrFreeBSD10Emu = 0x98 - sizeofRtMetricsFreeBSD10Emu = 0x70 - - sizeofIfMsghdrFreeBSD7Emu = 0xa8 - sizeofIfMsghdrFreeBSD8Emu = 0xa8 - sizeofIfMsghdrFreeBSD9Emu = 0xa8 - sizeofIfMsghdrFreeBSD10Emu = 0xa8 - sizeofIfMsghdrFreeBSD11Emu = 0xa8 - - sizeofIfDataFreeBSD7Emu = 0x98 - sizeofIfDataFreeBSD8Emu = 0x98 - sizeofIfDataFreeBSD9Emu = 0x98 - sizeofIfDataFreeBSD10Emu = 0x98 - sizeofIfDataFreeBSD11Emu = 0x98 - - sizeofSockaddrStorage = 0x80 - sizeofSockaddrInet = 0x10 - sizeofSockaddrInet6 = 0x1c -) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go deleted file mode 100644 index 0b675b3d..00000000 --- a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go +++ /dev/null @@ -1,123 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs defs_freebsd.go - -package route - -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_ROUTE = 0x11 - sysAF_LINK = 0x12 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 - - sysNET_RT_DUMP = 0x1 - sysNET_RT_FLAGS = 0x2 - sysNET_RT_IFLIST = 0x3 - sysNET_RT_IFMALIST = 0x4 - sysNET_RT_IFLISTL = 0x5 -) - -const ( - sysCTL_MAXNAME = 0x18 - - sysCTL_UNSPEC = 0x0 - sysCTL_KERN = 0x1 - sysCTL_VM = 0x2 - sysCTL_VFS = 0x3 - sysCTL_NET = 0x4 - sysCTL_DEBUG = 0x5 - sysCTL_HW = 0x6 - sysCTL_MACHDEP = 0x7 - sysCTL_USER = 0x8 - sysCTL_P1003_1B = 0x9 -) - -const ( - sysRTM_VERSION = 0x5 - - sysRTM_ADD = 0x1 - sysRTM_DELETE = 0x2 - sysRTM_CHANGE = 0x3 - sysRTM_GET = 0x4 - sysRTM_LOSING = 0x5 - sysRTM_REDIRECT = 0x6 - sysRTM_MISS = 0x7 - sysRTM_LOCK = 0x8 - sysRTM_RESOLVE = 0xb - sysRTM_NEWADDR = 0xc - sysRTM_DELADDR = 0xd - sysRTM_IFINFO = 0xe - sysRTM_NEWMADDR = 0xf - sysRTM_DELMADDR = 0x10 - sysRTM_IFANNOUNCE = 0x11 - sysRTM_IEEE80211 = 0x12 - - sysRTA_DST = 0x1 - sysRTA_GATEWAY = 0x2 - sysRTA_NETMASK = 0x4 - sysRTA_GENMASK = 0x8 - sysRTA_IFP = 0x10 - sysRTA_IFA = 0x20 - sysRTA_AUTHOR = 0x40 - sysRTA_BRD = 0x80 - - sysRTAX_DST = 0x0 - sysRTAX_GATEWAY = 0x1 - sysRTAX_NETMASK = 0x2 - sysRTAX_GENMASK = 0x3 - sysRTAX_IFP = 0x4 - sysRTAX_IFA = 0x5 - sysRTAX_AUTHOR = 0x6 - sysRTAX_BRD = 0x7 - sysRTAX_MAX = 0x8 -) - -const ( - sizeofIfMsghdrlFreeBSD10 = 0xb0 - sizeofIfaMsghdrFreeBSD10 = 0x14 - sizeofIfaMsghdrlFreeBSD10 = 0xb0 - sizeofIfmaMsghdrFreeBSD10 = 0x10 - sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 - - sizeofRtMsghdrFreeBSD10 = 0x98 - sizeofRtMetricsFreeBSD10 = 0x70 - - sizeofIfMsghdrFreeBSD7 = 0xa8 - sizeofIfMsghdrFreeBSD8 = 0xa8 - sizeofIfMsghdrFreeBSD9 = 0xa8 - sizeofIfMsghdrFreeBSD10 = 0xa8 - sizeofIfMsghdrFreeBSD11 = 0xa8 - - sizeofIfDataFreeBSD7 = 0x98 - sizeofIfDataFreeBSD8 = 0x98 - sizeofIfDataFreeBSD9 = 0x98 - sizeofIfDataFreeBSD10 = 0x98 - sizeofIfDataFreeBSD11 = 0x98 - - sizeofIfMsghdrlFreeBSD10Emu = 0xb0 - sizeofIfaMsghdrFreeBSD10Emu = 0x14 - sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 - sizeofIfmaMsghdrFreeBSD10Emu = 0x10 - sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 - - sizeofRtMsghdrFreeBSD10Emu = 0x98 - sizeofRtMetricsFreeBSD10Emu = 0x70 - - sizeofIfMsghdrFreeBSD7Emu = 0xa8 - sizeofIfMsghdrFreeBSD8Emu = 0xa8 - sizeofIfMsghdrFreeBSD9Emu = 0xa8 - sizeofIfMsghdrFreeBSD10Emu = 0xa8 - sizeofIfMsghdrFreeBSD11Emu = 0xa8 - - sizeofIfDataFreeBSD7Emu = 0x98 - sizeofIfDataFreeBSD8Emu = 0x98 - sizeofIfDataFreeBSD9Emu = 0x98 - sizeofIfDataFreeBSD10Emu = 0x98 - sizeofIfDataFreeBSD11Emu = 0x98 - - sizeofSockaddrStorage = 0x80 - sizeofSockaddrInet = 0x10 - sizeofSockaddrInet6 = 0x1c -) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go deleted file mode 100644 index 58f8ea16..00000000 --- a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go +++ /dev/null @@ -1,123 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs defs_freebsd.go - -package route - -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_ROUTE = 0x11 - sysAF_LINK = 0x12 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 - - sysNET_RT_DUMP = 0x1 - sysNET_RT_FLAGS = 0x2 - sysNET_RT_IFLIST = 0x3 - sysNET_RT_IFMALIST = 0x4 - sysNET_RT_IFLISTL = 0x5 -) - -const ( - sysCTL_MAXNAME = 0x18 - - sysCTL_UNSPEC = 0x0 - sysCTL_KERN = 0x1 - sysCTL_VM = 0x2 - sysCTL_VFS = 0x3 - sysCTL_NET = 0x4 - sysCTL_DEBUG = 0x5 - sysCTL_HW = 0x6 - sysCTL_MACHDEP = 0x7 - sysCTL_USER = 0x8 - sysCTL_P1003_1B = 0x9 -) - -const ( - sysRTM_VERSION = 0x5 - - sysRTM_ADD = 0x1 - sysRTM_DELETE = 0x2 - sysRTM_CHANGE = 0x3 - sysRTM_GET = 0x4 - sysRTM_LOSING = 0x5 - sysRTM_REDIRECT = 0x6 - sysRTM_MISS = 0x7 - sysRTM_LOCK = 0x8 - sysRTM_RESOLVE = 0xb - sysRTM_NEWADDR = 0xc - sysRTM_DELADDR = 0xd - sysRTM_IFINFO = 0xe - sysRTM_NEWMADDR = 0xf - sysRTM_DELMADDR = 0x10 - sysRTM_IFANNOUNCE = 0x11 - sysRTM_IEEE80211 = 0x12 - - sysRTA_DST = 0x1 - sysRTA_GATEWAY = 0x2 - sysRTA_NETMASK = 0x4 - sysRTA_GENMASK = 0x8 - sysRTA_IFP = 0x10 - sysRTA_IFA = 0x20 - sysRTA_AUTHOR = 0x40 - sysRTA_BRD = 0x80 - - sysRTAX_DST = 0x0 - sysRTAX_GATEWAY = 0x1 - sysRTAX_NETMASK = 0x2 - sysRTAX_GENMASK = 0x3 - sysRTAX_IFP = 0x4 - sysRTAX_IFA = 0x5 - sysRTAX_AUTHOR = 0x6 - sysRTAX_BRD = 0x7 - sysRTAX_MAX = 0x8 -) - -const ( - sizeofIfMsghdrlFreeBSD10 = 0x68 - sizeofIfaMsghdrFreeBSD10 = 0x14 - sizeofIfaMsghdrlFreeBSD10 = 0x6c - sizeofIfmaMsghdrFreeBSD10 = 0x10 - sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 - - sizeofRtMsghdrFreeBSD10 = 0x5c - sizeofRtMetricsFreeBSD10 = 0x38 - - sizeofIfMsghdrFreeBSD7 = 0x70 - sizeofIfMsghdrFreeBSD8 = 0x70 - sizeofIfMsghdrFreeBSD9 = 0x70 - sizeofIfMsghdrFreeBSD10 = 0x70 - sizeofIfMsghdrFreeBSD11 = 0xa8 - - sizeofIfDataFreeBSD7 = 0x60 - sizeofIfDataFreeBSD8 = 0x60 - sizeofIfDataFreeBSD9 = 0x60 - sizeofIfDataFreeBSD10 = 0x60 - sizeofIfDataFreeBSD11 = 0x98 - - sizeofIfMsghdrlFreeBSD10Emu = 0x68 - sizeofIfaMsghdrFreeBSD10Emu = 0x14 - sizeofIfaMsghdrlFreeBSD10Emu = 0x6c - sizeofIfmaMsghdrFreeBSD10Emu = 0x10 - sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 - - sizeofRtMsghdrFreeBSD10Emu = 0x5c - sizeofRtMetricsFreeBSD10Emu = 0x38 - - sizeofIfMsghdrFreeBSD7Emu = 0x70 - sizeofIfMsghdrFreeBSD8Emu = 0x70 - sizeofIfMsghdrFreeBSD9Emu = 0x70 - sizeofIfMsghdrFreeBSD10Emu = 0x70 - sizeofIfMsghdrFreeBSD11Emu = 0xa8 - - sizeofIfDataFreeBSD7Emu = 0x60 - sizeofIfDataFreeBSD8Emu = 0x60 - sizeofIfDataFreeBSD9Emu = 0x60 - sizeofIfDataFreeBSD10Emu = 0x60 - sizeofIfDataFreeBSD11Emu = 0x98 - - sizeofSockaddrStorage = 0x80 - sizeofSockaddrInet = 0x10 - sizeofSockaddrInet6 = 0x1c -) diff --git a/vendor/golang.org/x/net/route/zsys_netbsd.go b/vendor/golang.org/x/net/route/zsys_netbsd.go deleted file mode 100644 index e0df45e8..00000000 --- a/vendor/golang.org/x/net/route/zsys_netbsd.go +++ /dev/null @@ -1,97 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs defs_netbsd.go - -package route - -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_ROUTE = 0x22 - sysAF_LINK = 0x12 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 - - sysNET_RT_DUMP = 0x1 - sysNET_RT_FLAGS = 0x2 - sysNET_RT_IFLIST = 0x5 - sysNET_RT_MAXID = 0x6 -) - -const ( - sysCTL_MAXNAME = 0xc - - sysCTL_UNSPEC = 0x0 - sysCTL_KERN = 0x1 - sysCTL_VM = 0x2 - sysCTL_VFS = 0x3 - sysCTL_NET = 0x4 - sysCTL_DEBUG = 0x5 - sysCTL_HW = 0x6 - sysCTL_MACHDEP = 0x7 - sysCTL_USER = 0x8 - sysCTL_DDB = 0x9 - sysCTL_PROC = 0xa - sysCTL_VENDOR = 0xb - sysCTL_EMUL = 0xc - sysCTL_SECURITY = 0xd - sysCTL_MAXID = 0xe -) - -const ( - sysRTM_VERSION = 0x4 - - sysRTM_ADD = 0x1 - sysRTM_DELETE = 0x2 - sysRTM_CHANGE = 0x3 - sysRTM_GET = 0x4 - sysRTM_LOSING = 0x5 - sysRTM_REDIRECT = 0x6 - sysRTM_MISS = 0x7 - sysRTM_LOCK = 0x8 - sysRTM_OLDADD = 0x9 - sysRTM_OLDDEL = 0xa - sysRTM_RESOLVE = 0xb - sysRTM_NEWADDR = 0xc - sysRTM_DELADDR = 0xd - sysRTM_IFANNOUNCE = 0x10 - sysRTM_IEEE80211 = 0x11 - sysRTM_SETGATE = 0x12 - sysRTM_LLINFO_UPD = 0x13 - sysRTM_IFINFO = 0x14 - sysRTM_CHGADDR = 0x15 - - sysRTA_DST = 0x1 - sysRTA_GATEWAY = 0x2 - sysRTA_NETMASK = 0x4 - sysRTA_GENMASK = 0x8 - sysRTA_IFP = 0x10 - sysRTA_IFA = 0x20 - sysRTA_AUTHOR = 0x40 - sysRTA_BRD = 0x80 - sysRTA_TAG = 0x100 - - sysRTAX_DST = 0x0 - sysRTAX_GATEWAY = 0x1 - sysRTAX_NETMASK = 0x2 - sysRTAX_GENMASK = 0x3 - sysRTAX_IFP = 0x4 - sysRTAX_IFA = 0x5 - sysRTAX_AUTHOR = 0x6 - sysRTAX_BRD = 0x7 - sysRTAX_TAG = 0x8 - sysRTAX_MAX = 0x9 -) - -const ( - sizeofIfMsghdrNetBSD7 = 0x98 - sizeofIfaMsghdrNetBSD7 = 0x18 - sizeofIfAnnouncemsghdrNetBSD7 = 0x18 - - sizeofRtMsghdrNetBSD7 = 0x78 - sizeofRtMetricsNetBSD7 = 0x50 - - sizeofSockaddrStorage = 0x80 - sizeofSockaddrInet = 0x10 - sizeofSockaddrInet6 = 0x1c -) diff --git a/vendor/golang.org/x/net/route/zsys_openbsd.go b/vendor/golang.org/x/net/route/zsys_openbsd.go deleted file mode 100644 index db8c8efb..00000000 --- a/vendor/golang.org/x/net/route/zsys_openbsd.go +++ /dev/null @@ -1,101 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs defs_openbsd.go - -package route - -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_ROUTE = 0x11 - sysAF_LINK = 0x12 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 - - sysNET_RT_DUMP = 0x1 - sysNET_RT_FLAGS = 0x2 - sysNET_RT_IFLIST = 0x3 - sysNET_RT_STATS = 0x4 - sysNET_RT_TABLE = 0x5 - sysNET_RT_IFNAMES = 0x6 - sysNET_RT_MAXID = 0x7 -) - -const ( - sysCTL_MAXNAME = 0xc - - sysCTL_UNSPEC = 0x0 - sysCTL_KERN = 0x1 - sysCTL_VM = 0x2 - sysCTL_FS = 0x3 - sysCTL_NET = 0x4 - sysCTL_DEBUG = 0x5 - sysCTL_HW = 0x6 - sysCTL_MACHDEP = 0x7 - sysCTL_DDB = 0x9 - sysCTL_VFS = 0xa - sysCTL_MAXID = 0xb -) - -const ( - sysRTM_VERSION = 0x5 - - sysRTM_ADD = 0x1 - sysRTM_DELETE = 0x2 - sysRTM_CHANGE = 0x3 - sysRTM_GET = 0x4 - sysRTM_LOSING = 0x5 - sysRTM_REDIRECT = 0x6 - sysRTM_MISS = 0x7 - sysRTM_LOCK = 0x8 - sysRTM_RESOLVE = 0xb - sysRTM_NEWADDR = 0xc - sysRTM_DELADDR = 0xd - sysRTM_IFINFO = 0xe - sysRTM_IFANNOUNCE = 0xf - sysRTM_DESYNC = 0x10 - sysRTM_INVALIDATE = 0x11 - sysRTM_BFD = 0x12 - sysRTM_PROPOSAL = 0x13 - - sysRTA_DST = 0x1 - sysRTA_GATEWAY = 0x2 - sysRTA_NETMASK = 0x4 - sysRTA_GENMASK = 0x8 - sysRTA_IFP = 0x10 - sysRTA_IFA = 0x20 - sysRTA_AUTHOR = 0x40 - sysRTA_BRD = 0x80 - sysRTA_SRC = 0x100 - sysRTA_SRCMASK = 0x200 - sysRTA_LABEL = 0x400 - sysRTA_BFD = 0x800 - sysRTA_DNS = 0x1000 - sysRTA_STATIC = 0x2000 - sysRTA_SEARCH = 0x4000 - - sysRTAX_DST = 0x0 - sysRTAX_GATEWAY = 0x1 - sysRTAX_NETMASK = 0x2 - sysRTAX_GENMASK = 0x3 - sysRTAX_IFP = 0x4 - sysRTAX_IFA = 0x5 - sysRTAX_AUTHOR = 0x6 - sysRTAX_BRD = 0x7 - sysRTAX_SRC = 0x8 - sysRTAX_SRCMASK = 0x9 - sysRTAX_LABEL = 0xa - sysRTAX_BFD = 0xb - sysRTAX_DNS = 0xc - sysRTAX_STATIC = 0xd - sysRTAX_SEARCH = 0xe - sysRTAX_MAX = 0xf -) - -const ( - sizeofRtMsghdr = 0x60 - - sizeofSockaddrStorage = 0x100 - sizeofSockaddrInet = 0x10 - sizeofSockaddrInet6 = 0x1c -) diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go deleted file mode 100644 index c646a695..00000000 --- a/vendor/golang.org/x/net/trace/events.go +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "text/tabwriter" - "time" -) - -const maxEventsPerLog = 100 - -type bucket struct { - MaxErrAge time.Duration - String string -} - -var buckets = []bucket{ - {0, "total"}, - {10 * time.Second, "errs<10s"}, - {1 * time.Minute, "errs<1m"}, - {10 * time.Minute, "errs<10m"}, - {1 * time.Hour, "errs<1h"}, - {10 * time.Hour, "errs<10h"}, - {24000 * time.Hour, "errors"}, -} - -// RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Events handler. -func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { - now := time.Now() - data := &struct { - Families []string // family names - Buckets []bucket - Counts [][]int // eventLog count per family/bucket - - // Set when a bucket has been selected. - Family string - Bucket int - EventLogs eventLogs - Expanded bool - }{ - Buckets: buckets, - } - - data.Families = make([]string, 0, len(families)) - famMu.RLock() - for name := range families { - data.Families = append(data.Families, name) - } - famMu.RUnlock() - sort.Strings(data.Families) - - // Count the number of eventLogs in each family for each error age. - data.Counts = make([][]int, len(data.Families)) - for i, name := range data.Families { - // TODO(sameer): move this loop under the family lock. - f := getEventFamily(name) - data.Counts[i] = make([]int, len(data.Buckets)) - for j, b := range data.Buckets { - data.Counts[i][j] = f.Count(now, b.MaxErrAge) - } - } - - if req != nil { - var ok bool - data.Family, data.Bucket, ok = parseEventsArgs(req) - if !ok { - // No-op - } else { - data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) - } - if data.EventLogs != nil { - defer data.EventLogs.Free() - sort.Sort(data.EventLogs) - } - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - } - - famMu.RLock() - defer famMu.RUnlock() - if err := eventsTmpl().Execute(w, data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < 0 || b >= len(buckets) { - return "", 0, false - } - return fam, b, true -} - -// An EventLog provides a log of events associated with a specific object. -type EventLog interface { - // Printf formats its arguments with fmt.Sprintf and adds the - // result to the event log. - Printf(format string, a ...interface{}) - - // Errorf is like Printf, but it marks this event as an error. - Errorf(format string, a ...interface{}) - - // Finish declares that this event log is complete. - // The event log should not be used after calling this method. - Finish() -} - -// NewEventLog returns a new EventLog with the specified family name -// and title. -func NewEventLog(family, title string) EventLog { - el := newEventLog() - el.ref() - el.Family, el.Title = family, title - el.Start = time.Now() - el.events = make([]logEntry, 0, maxEventsPerLog) - el.stack = make([]uintptr, 32) - n := runtime.Callers(2, el.stack) - el.stack = el.stack[:n] - - getEventFamily(family).add(el) - return el -} - -func (el *eventLog) Finish() { - getEventFamily(el.Family).remove(el) - el.unref() // matches ref in New -} - -var ( - famMu sync.RWMutex - families = make(map[string]*eventFamily) // family name => family -) - -func getEventFamily(fam string) *eventFamily { - famMu.Lock() - defer famMu.Unlock() - f := families[fam] - if f == nil { - f = &eventFamily{} - families[fam] = f - } - return f -} - -type eventFamily struct { - mu sync.RWMutex - eventLogs eventLogs -} - -func (f *eventFamily) add(el *eventLog) { - f.mu.Lock() - f.eventLogs = append(f.eventLogs, el) - f.mu.Unlock() -} - -func (f *eventFamily) remove(el *eventLog) { - f.mu.Lock() - defer f.mu.Unlock() - for i, el0 := range f.eventLogs { - if el == el0 { - copy(f.eventLogs[i:], f.eventLogs[i+1:]) - f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] - return - } - } -} - -func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - n++ - } - } - return -} - -func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { - f.mu.RLock() - defer f.mu.RUnlock() - els = make(eventLogs, 0, len(f.eventLogs)) - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - el.ref() - els = append(els, el) - } - } - return -} - -type eventLogs []*eventLog - -// Free calls unref on each element of the list. -func (els eventLogs) Free() { - for _, el := range els { - el.unref() - } -} - -// eventLogs may be sorted in reverse chronological order. -func (els eventLogs) Len() int { return len(els) } -func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } -func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } - -// A logEntry is a timestamped log entry in an event log. -type logEntry struct { - When time.Time - Elapsed time.Duration // since previous event in log - NewDay bool // whether this event is on a different day to the previous event - What string - IsErr bool -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e logEntry) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// An eventLog represents an active event log. -type eventLog struct { - // Family is the top-level grouping of event logs to which this belongs. - Family string - - // Title is the title of this event log. - Title string - - // Timing information. - Start time.Time - - // Call stack where this event log was created. - stack []uintptr - - // Append-only sequence of events. - // - // TODO(sameer): change this to a ring buffer to avoid the array copy - // when we hit maxEventsPerLog. - mu sync.RWMutex - events []logEntry - LastErrorTime time.Time - discarded int - - refs int32 // how many buckets this is in -} - -func (el *eventLog) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - el.Family = "" - el.Title = "" - el.Start = time.Time{} - el.stack = nil - el.events = nil - el.LastErrorTime = time.Time{} - el.discarded = 0 - el.refs = 0 -} - -func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { - if maxErrAge == 0 { - return true - } - el.mu.RLock() - defer el.mu.RUnlock() - return now.Sub(el.LastErrorTime) < maxErrAge -} - -// delta returns the elapsed time since the last event or the log start, -// and whether it spans midnight. -// L >= el.mu -func (el *eventLog) delta(t time.Time) (time.Duration, bool) { - if len(el.events) == 0 { - return t.Sub(el.Start), false - } - prev := el.events[len(el.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() - -} - -func (el *eventLog) Printf(format string, a ...interface{}) { - el.printf(false, format, a...) -} - -func (el *eventLog) Errorf(format string, a ...interface{}) { - el.printf(true, format, a...) -} - -func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { - e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} - el.mu.Lock() - e.Elapsed, e.NewDay = el.delta(e.When) - if len(el.events) < maxEventsPerLog { - el.events = append(el.events, e) - } else { - // Discard the oldest event. - if el.discarded == 0 { - // el.discarded starts at two to count for the event it - // is replacing, plus the next one that we are about to - // drop. - el.discarded = 2 - } else { - el.discarded++ - } - // TODO(sameer): if this causes allocations on a critical path, - // change eventLog.What to be a fmt.Stringer, as in trace.go. - el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - el.events[0].When = el.events[1].When - copy(el.events[1:], el.events[2:]) - el.events[maxEventsPerLog-1] = e - } - if e.IsErr { - el.LastErrorTime = e.When - } - el.mu.Unlock() -} - -func (el *eventLog) ref() { - atomic.AddInt32(&el.refs, 1) -} - -func (el *eventLog) unref() { - if atomic.AddInt32(&el.refs, -1) == 0 { - freeEventLog(el) - } -} - -func (el *eventLog) When() string { - return el.Start.Format("2006/01/02 15:04:05.000000") -} - -func (el *eventLog) ElapsedTime() string { - elapsed := time.Since(el.Start) - return fmt.Sprintf("%.6f", elapsed.Seconds()) -} - -func (el *eventLog) Stack() string { - buf := new(bytes.Buffer) - tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) - printStackRecord(tw, el.stack) - tw.Flush() - return buf.String() -} - -// printStackRecord prints the function + source line information -// for a single stack trace. -// Adapted from runtime/pprof/pprof.go. -func printStackRecord(w io.Writer, stk []uintptr) { - for _, pc := range stk { - f := runtime.FuncForPC(pc) - if f == nil { - continue - } - file, line := f.FileLine(pc) - name := f.Name() - // Hide runtime.goexit and any runtime functions at the beginning. - if strings.HasPrefix(name, "runtime.") { - continue - } - fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) - } -} - -func (el *eventLog) Events() []logEntry { - el.mu.RLock() - defer el.mu.RUnlock() - return el.events -} - -// freeEventLogs is a freelist of *eventLog -var freeEventLogs = make(chan *eventLog, 1000) - -// newEventLog returns a event log ready to use. -func newEventLog() *eventLog { - select { - case el := <-freeEventLogs: - return el - default: - return new(eventLog) - } -} - -// freeEventLog adds el to freeEventLogs if there's room. -// This is non-blocking. -func freeEventLog(el *eventLog) { - el.reset() - select { - case freeEventLogs <- el: - default: - } -} - -var eventsTmplCache *template.Template -var eventsTmplOnce sync.Once - -func eventsTmpl() *template.Template { - eventsTmplOnce.Do(func() { - eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ - "elapsed": elapsed, - "trimSpace": strings.TrimSpace, - }).Parse(eventsHTML)) - }) - return eventsTmplCache -} - -const eventsHTML = ` - - - events - - - - -

    /debug/events

    - -
  • - {{range $i, $fam := .Families}} - - - - {{range $j, $bucket := $.Buckets}} - {{$n := index $.Counts $i $j}} - - {{end}} - - {{end}} -
    {{$fam}} - {{if $n}}{{end}} - [{{$n}} {{$bucket.String}}] - {{if $n}}{{end}} -
    - -{{if $.EventLogs}} -


    -

    Family: {{$.Family}}

    - -{{if $.Expanded}}{{end}} -[Summary]{{if $.Expanded}}{{end}} - -{{if not $.Expanded}}{{end}} -[Expanded]{{if not $.Expanded}}{{end}} - - - - {{range $el := $.EventLogs}} - - - - - {{if $.Expanded}} - - - - - - {{range $el.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
    WhenElapsed
    {{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} -
    {{$el.Stack|trimSpace}}
    {{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
    -{{end}} - - -` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go deleted file mode 100644 index 9bf4286c..00000000 --- a/vendor/golang.org/x/net/trace/histogram.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -// This file implements histogramming for RPC statistics collection. - -import ( - "bytes" - "fmt" - "html/template" - "log" - "math" - "sync" - - "golang.org/x/net/internal/timeseries" -) - -const ( - bucketCount = 38 -) - -// histogram keeps counts of values in buckets that are spaced -// out in powers of 2: 0-1, 2-3, 4-7... -// histogram implements timeseries.Observable -type histogram struct { - sum int64 // running total of measurements - sumOfSquares float64 // square of running total - buckets []int64 // bucketed values for histogram - value int // holds a single value as an optimization - valueCount int64 // number of values recorded for single value -} - -// AddMeasurement records a value measurement observation to the histogram. -func (h *histogram) addMeasurement(value int64) { - // TODO: assert invariant - h.sum += value - h.sumOfSquares += float64(value) * float64(value) - - bucketIndex := getBucket(value) - - if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { - h.value = bucketIndex - h.valueCount++ - } else { - h.allocateBuckets() - h.buckets[bucketIndex]++ - } -} - -func (h *histogram) allocateBuckets() { - if h.buckets == nil { - h.buckets = make([]int64, bucketCount) - h.buckets[h.value] = h.valueCount - h.value = 0 - h.valueCount = -1 - } -} - -func log2(i int64) int { - n := 0 - for ; i >= 0x100; i >>= 8 { - n += 8 - } - for ; i > 0; i >>= 1 { - n += 1 - } - return n -} - -func getBucket(i int64) (index int) { - index = log2(i) - 1 - if index < 0 { - index = 0 - } - if index >= bucketCount { - index = bucketCount - 1 - } - return -} - -// Total returns the number of recorded observations. -func (h *histogram) total() (total int64) { - if h.valueCount >= 0 { - total = h.valueCount - } - for _, val := range h.buckets { - total += int64(val) - } - return -} - -// Average returns the average value of recorded observations. -func (h *histogram) average() float64 { - t := h.total() - if t == 0 { - return 0 - } - return float64(h.sum) / float64(t) -} - -// Variance returns the variance of recorded observations. -func (h *histogram) variance() float64 { - t := float64(h.total()) - if t == 0 { - return 0 - } - s := float64(h.sum) / t - return h.sumOfSquares/t - s*s -} - -// StandardDeviation returns the standard deviation of recorded observations. -func (h *histogram) standardDeviation() float64 { - return math.Sqrt(h.variance()) -} - -// PercentileBoundary estimates the value that the given fraction of recorded -// observations are less than. -func (h *histogram) percentileBoundary(percentile float64) int64 { - total := h.total() - - // Corner cases (make sure result is strictly less than Total()) - if total == 0 { - return 0 - } else if total == 1 { - return int64(h.average()) - } - - percentOfTotal := round(float64(total) * percentile) - var runningTotal int64 - - for i := range h.buckets { - value := h.buckets[i] - runningTotal += value - if runningTotal == percentOfTotal { - // We hit an exact bucket boundary. If the next bucket has data, it is a - // good estimate of the value. If the bucket is empty, we interpolate the - // midpoint between the next bucket's boundary and the next non-zero - // bucket. If the remaining buckets are all empty, then we use the - // boundary for the next bucket as the estimate. - j := uint8(i + 1) - min := bucketBoundary(j) - if runningTotal < total { - for h.buckets[j] == 0 { - j++ - } - } - max := bucketBoundary(j) - return min + round(float64(max-min)/2) - } else if runningTotal > percentOfTotal { - // The value is in this bucket. Interpolate the value. - delta := runningTotal - percentOfTotal - percentBucket := float64(value-delta) / float64(value) - bucketMin := bucketBoundary(uint8(i)) - nextBucketMin := bucketBoundary(uint8(i + 1)) - bucketSize := nextBucketMin - bucketMin - return bucketMin + round(percentBucket*float64(bucketSize)) - } - } - return bucketBoundary(bucketCount - 1) -} - -// Median returns the estimated median of the observed values. -func (h *histogram) median() int64 { - return h.percentileBoundary(0.5) -} - -// Add adds other to h. -func (h *histogram) Add(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == 0 { - // Other histogram is empty - } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { - // Both have a single bucketed value, aggregate them - h.valueCount += o.valueCount - } else { - // Two different values necessitate buckets in this histogram - h.allocateBuckets() - if o.valueCount >= 0 { - h.buckets[o.value] += o.valueCount - } else { - for i := range h.buckets { - h.buckets[i] += o.buckets[i] - } - } - } - h.sumOfSquares += o.sumOfSquares - h.sum += o.sum -} - -// Clear resets the histogram to an empty state, removing all observed values. -func (h *histogram) Clear() { - h.buckets = nil - h.value = 0 - h.valueCount = 0 - h.sum = 0 - h.sumOfSquares = 0 -} - -// CopyFrom copies from other, which must be a *histogram, into h. -func (h *histogram) CopyFrom(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == -1 { - h.allocateBuckets() - copy(h.buckets, o.buckets) - } - h.sum = o.sum - h.sumOfSquares = o.sumOfSquares - h.value = o.value - h.valueCount = o.valueCount -} - -// Multiply scales the histogram by the specified ratio. -func (h *histogram) Multiply(ratio float64) { - if h.valueCount == -1 { - for i := range h.buckets { - h.buckets[i] = int64(float64(h.buckets[i]) * ratio) - } - } else { - h.valueCount = int64(float64(h.valueCount) * ratio) - } - h.sum = int64(float64(h.sum) * ratio) - h.sumOfSquares = h.sumOfSquares * ratio -} - -// New creates a new histogram. -func (h *histogram) New() timeseries.Observable { - r := new(histogram) - r.Clear() - return r -} - -func (h *histogram) String() string { - return fmt.Sprintf("%d, %f, %d, %d, %v", - h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) -} - -// round returns the closest int64 to the argument -func round(in float64) int64 { - return int64(math.Floor(in + 0.5)) -} - -// bucketBoundary returns the first value in the bucket. -func bucketBoundary(bucket uint8) int64 { - if bucket == 0 { - return 0 - } - return 1 << bucket -} - -// bucketData holds data about a specific bucket for use in distTmpl. -type bucketData struct { - Lower, Upper int64 - N int64 - Pct, CumulativePct float64 - GraphWidth int -} - -// data holds data about a Distribution for use in distTmpl. -type data struct { - Buckets []*bucketData - Count, Median int64 - Mean, StandardDeviation float64 -} - -// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. -const maxHTMLBarWidth = 350.0 - -// newData returns data representing h for use in distTmpl. -func (h *histogram) newData() *data { - // Force the allocation of buckets to simplify the rendering implementation - h.allocateBuckets() - // We scale the bars on the right so that the largest bar is - // maxHTMLBarWidth pixels in width. - maxBucket := int64(0) - for _, n := range h.buckets { - if n > maxBucket { - maxBucket = n - } - } - total := h.total() - barsizeMult := maxHTMLBarWidth / float64(maxBucket) - var pctMult float64 - if total == 0 { - pctMult = 1.0 - } else { - pctMult = 100.0 / float64(total) - } - - buckets := make([]*bucketData, len(h.buckets)) - runningTotal := int64(0) - for i, n := range h.buckets { - if n == 0 { - continue - } - runningTotal += n - var upperBound int64 - if i < bucketCount-1 { - upperBound = bucketBoundary(uint8(i + 1)) - } else { - upperBound = math.MaxInt64 - } - buckets[i] = &bucketData{ - Lower: bucketBoundary(uint8(i)), - Upper: upperBound, - N: n, - Pct: float64(n) * pctMult, - CumulativePct: float64(runningTotal) * pctMult, - GraphWidth: int(float64(n) * barsizeMult), - } - } - return &data{ - Buckets: buckets, - Count: total, - Median: h.median(), - Mean: h.average(), - StandardDeviation: h.standardDeviation(), - } -} - -func (h *histogram) html() template.HTML { - buf := new(bytes.Buffer) - if err := distTmpl().Execute(buf, h.newData()); err != nil { - buf.Reset() - log.Printf("net/trace: couldn't execute template: %v", err) - } - return template.HTML(buf.String()) -} - -var distTmplCache *template.Template -var distTmplOnce sync.Once - -func distTmpl() *template.Template { - distTmplOnce.Do(func() { - // Input: data - distTmplCache = template.Must(template.New("distTmpl").Parse(` - - - - - - - -
    Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
    -
    - -{{range $b := .Buckets}} -{{if $b}} - - - - - - - - - -{{end}} -{{end}} -
    [{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
    -`)) - }) - return distTmplCache -} diff --git a/vendor/golang.org/x/net/trace/histogram_test.go b/vendor/golang.org/x/net/trace/histogram_test.go deleted file mode 100644 index d384b933..00000000 --- a/vendor/golang.org/x/net/trace/histogram_test.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "math" - "testing" -) - -type sumTest struct { - value int64 - sum int64 - sumOfSquares float64 - total int64 -} - -var sumTests = []sumTest{ - {100, 100, 10000, 1}, - {50, 150, 12500, 2}, - {50, 200, 15000, 3}, - {50, 250, 17500, 4}, -} - -type bucketingTest struct { - in int64 - log int - bucket int -} - -var bucketingTests = []bucketingTest{ - {0, 0, 0}, - {1, 1, 0}, - {2, 2, 1}, - {3, 2, 1}, - {4, 3, 2}, - {1000, 10, 9}, - {1023, 10, 9}, - {1024, 11, 10}, - {1000000, 20, 19}, -} - -type multiplyTest struct { - in int64 - ratio float64 - expectedSum int64 - expectedTotal int64 - expectedSumOfSquares float64 -} - -var multiplyTests = []multiplyTest{ - {15, 2.5, 37, 2, 562.5}, - {128, 4.6, 758, 13, 77953.9}, -} - -type percentileTest struct { - fraction float64 - expected int64 -} - -var percentileTests = []percentileTest{ - {0.25, 48}, - {0.5, 96}, - {0.6, 109}, - {0.75, 128}, - {0.90, 205}, - {0.95, 230}, - {0.99, 256}, -} - -func TestSum(t *testing.T) { - var h histogram - - for _, test := range sumTests { - h.addMeasurement(test.value) - sum := h.sum - if sum != test.sum { - t.Errorf("h.Sum = %v WANT: %v", sum, test.sum) - } - - sumOfSquares := h.sumOfSquares - if sumOfSquares != test.sumOfSquares { - t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares) - } - - total := h.total() - if total != test.total { - t.Errorf("h.Total = %v WANT: %v", total, test.total) - } - } -} - -func TestMultiply(t *testing.T) { - var h histogram - for i, test := range multiplyTests { - h.addMeasurement(test.in) - h.Multiply(test.ratio) - if h.sum != test.expectedSum { - t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum) - } - if h.total() != test.expectedTotal { - t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal) - } - if h.sumOfSquares != test.expectedSumOfSquares { - t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares) - } - } -} - -func TestBucketingFunctions(t *testing.T) { - for _, test := range bucketingTests { - log := log2(test.in) - if log != test.log { - t.Errorf("log2 = %v WANT: %v", log, test.log) - } - - bucket := getBucket(test.in) - if bucket != test.bucket { - t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket) - } - } -} - -func TestAverage(t *testing.T) { - a := new(histogram) - average := a.average() - if average != 0 { - t.Errorf("Average of empty histogram was %v WANT: 0", average) - } - - a.addMeasurement(1) - a.addMeasurement(1) - a.addMeasurement(3) - const expected = float64(5) / float64(3) - average = a.average() - - if !isApproximate(average, expected) { - t.Errorf("Average = %g WANT: %v", average, expected) - } -} - -func TestStandardDeviation(t *testing.T) { - a := new(histogram) - add(a, 10, 1<<4) - add(a, 10, 1<<5) - add(a, 10, 1<<6) - stdDev := a.standardDeviation() - const expected = 19.95 - - if !isApproximate(stdDev, expected) { - t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected) - } - - // No values - a = new(histogram) - stdDev = a.standardDeviation() - - if !isApproximate(stdDev, 0) { - t.Errorf("StandardDeviation = %v WANT: 0", stdDev) - } - - add(a, 1, 1<<4) - if !isApproximate(stdDev, 0) { - t.Errorf("StandardDeviation = %v WANT: 0", stdDev) - } - - add(a, 10, 1<<4) - if !isApproximate(stdDev, 0) { - t.Errorf("StandardDeviation = %v WANT: 0", stdDev) - } -} - -func TestPercentileBoundary(t *testing.T) { - a := new(histogram) - add(a, 5, 1<<4) - add(a, 10, 1<<6) - add(a, 5, 1<<7) - - for _, test := range percentileTests { - percentile := a.percentileBoundary(test.fraction) - if percentile != test.expected { - t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected) - } - } -} - -func TestCopyFrom(t *testing.T) { - a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} - b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1} - - a.CopyFrom(&b) - - if a.String() != b.String() { - t.Errorf("a.String = %s WANT: %s", a.String(), b.String()) - } -} - -func TestClear(t *testing.T) { - a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} - - a.Clear() - - expected := "0, 0.000000, 0, 0, []" - if a.String() != expected { - t.Errorf("a.String = %s WANT %s", a.String(), expected) - } -} - -func TestNew(t *testing.T) { - a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} - b := a.New() - - expected := "0, 0.000000, 0, 0, []" - if b.(*histogram).String() != expected { - t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected) - } -} - -func TestAdd(t *testing.T) { - // The tests here depend on the associativity of addMeasurement and Add. - // Add empty observation - a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} - b := a.New() - - expected := a.String() - a.Add(b) - if a.String() != expected { - t.Errorf("a.String = %s WANT: %s", a.String(), expected) - } - - // Add same bucketed value, no new buckets - c := new(histogram) - d := new(histogram) - e := new(histogram) - c.addMeasurement(12) - d.addMeasurement(11) - e.addMeasurement(12) - e.addMeasurement(11) - c.Add(d) - if c.String() != e.String() { - t.Errorf("c.String = %s WANT: %s", c.String(), e.String()) - } - - // Add bucketed values - f := new(histogram) - g := new(histogram) - h := new(histogram) - f.addMeasurement(4) - f.addMeasurement(12) - f.addMeasurement(100) - g.addMeasurement(18) - g.addMeasurement(36) - g.addMeasurement(255) - h.addMeasurement(4) - h.addMeasurement(12) - h.addMeasurement(100) - h.addMeasurement(18) - h.addMeasurement(36) - h.addMeasurement(255) - f.Add(g) - if f.String() != h.String() { - t.Errorf("f.String = %q WANT: %q", f.String(), h.String()) - } - - // add buckets to no buckets - i := new(histogram) - j := new(histogram) - k := new(histogram) - j.addMeasurement(18) - j.addMeasurement(36) - j.addMeasurement(255) - k.addMeasurement(18) - k.addMeasurement(36) - k.addMeasurement(255) - i.Add(j) - if i.String() != k.String() { - t.Errorf("i.String = %q WANT: %q", i.String(), k.String()) - } - - // add buckets to single value (no overlap) - l := new(histogram) - m := new(histogram) - n := new(histogram) - l.addMeasurement(0) - m.addMeasurement(18) - m.addMeasurement(36) - m.addMeasurement(255) - n.addMeasurement(0) - n.addMeasurement(18) - n.addMeasurement(36) - n.addMeasurement(255) - l.Add(m) - if l.String() != n.String() { - t.Errorf("l.String = %q WANT: %q", l.String(), n.String()) - } - - // mixed order - o := new(histogram) - p := new(histogram) - o.addMeasurement(0) - o.addMeasurement(2) - o.addMeasurement(0) - p.addMeasurement(0) - p.addMeasurement(0) - p.addMeasurement(2) - if o.String() != p.String() { - t.Errorf("o.String = %q WANT: %q", o.String(), p.String()) - } -} - -func add(h *histogram, times int, val int64) { - for i := 0; i < times; i++ { - h.addMeasurement(val) - } -} - -func isApproximate(x, y float64) bool { - return math.Abs(x-y) < 1e-2 -} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go deleted file mode 100644 index bb72a527..00000000 --- a/vendor/golang.org/x/net/trace/trace.go +++ /dev/null @@ -1,1082 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package trace implements tracing of requests and long-lived objects. -It exports HTTP interfaces on /debug/requests and /debug/events. - -A trace.Trace provides tracing for short-lived objects, usually requests. -A request handler might be implemented like this: - - func fooHandler(w http.ResponseWriter, req *http.Request) { - tr := trace.New("mypkg.Foo", req.URL.Path) - defer tr.Finish() - ... - tr.LazyPrintf("some event %q happened", str) - ... - if err := somethingImportant(); err != nil { - tr.LazyPrintf("somethingImportant failed: %v", err) - tr.SetError() - } - } - -The /debug/requests HTTP endpoint organizes the traces by family, -errors, and duration. It also provides histogram of request duration -for each family. - -A trace.EventLog provides tracing for long-lived objects, such as RPC -connections. - - // A Fetcher fetches URL paths for a single domain. - type Fetcher struct { - domain string - events trace.EventLog - } - - func NewFetcher(domain string) *Fetcher { - return &Fetcher{ - domain, - trace.NewEventLog("mypkg.Fetcher", domain), - } - } - - func (f *Fetcher) Fetch(path string) (string, error) { - resp, err := http.Get("http://" + f.domain + "/" + path) - if err != nil { - f.events.Errorf("Get(%q) = %v", path, err) - return "", err - } - f.events.Printf("Get(%q) = %s", path, resp.Status) - ... - } - - func (f *Fetcher) Close() error { - f.events.Finish() - return nil - } - -The /debug/events HTTP endpoint organizes the event logs by family and -by time since the last error. The expanded view displays recent log -entries and the log's call stack. -*/ -package trace // import "golang.org/x/net/trace" - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net" - "net/http" - "runtime" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/internal/timeseries" -) - -// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. -// FOR DEBUGGING ONLY. This will slow down the program. -var DebugUseAfterFinish = false - -// AuthRequest determines whether a specific request is permitted to load the -// /debug/requests or /debug/events pages. -// -// It returns two bools; the first indicates whether the page may be viewed at all, -// and the second indicates whether sensitive events will be shown. -// -// AuthRequest may be replaced by a program to customize its authorization requirements. -// -// The default AuthRequest function returns (true, true) if and only if the request -// comes from localhost/127.0.0.1/[::1]. -var AuthRequest = func(req *http.Request) (any, sensitive bool) { - // RemoteAddr is commonly in the form "IP" or "IP:port". - // If it is in the form "IP:port", split off the port. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - switch host { - case "localhost", "127.0.0.1", "::1": - return true, true - default: - return false, false - } -} - -func init() { - // TODO(jbd): Serve Traces from /debug/traces in the future? - // There is no requirement for a request to be present to have traces. - http.HandleFunc("/debug/requests", Traces) - http.HandleFunc("/debug/events", Events) -} - -// Traces responds with traces from the program. -// The package initialization registers it in http.DefaultServeMux -// at /debug/requests. -// -// It performs authorization by running AuthRequest. -func Traces(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) -} - -// Events responds with a page of events collected by EventLogs. -// The package initialization registers it in http.DefaultServeMux -// at /debug/events. -// -// It performs authorization by running AuthRequest. -func Events(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) -} - -// Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Traces handler. -func Render(w io.Writer, req *http.Request, sensitive bool) { - data := &struct { - Families []string - ActiveTraceCount map[string]int - CompletedTraces map[string]*family - - // Set when a bucket has been selected. - Traces traceList - Family string - Bucket int - Expanded bool - Traced bool - Active bool - ShowSensitive bool // whether to show sensitive events - - Histogram template.HTML - HistogramWindow string // e.g. "last minute", "last hour", "all time" - - // If non-zero, the set of traces is a partial set, - // and this is the total number. - Total int - }{ - CompletedTraces: completedTraces, - } - - data.ShowSensitive = sensitive - if req != nil { - // Allow show_sensitive=0 to force hiding of sensitive data for testing. - // This only goes one way; you can't use show_sensitive=1 to see things. - if req.FormValue("show_sensitive") == "0" { - data.ShowSensitive = false - } - - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { - data.Traced = exp - } - } - - completedMu.RLock() - data.Families = make([]string, 0, len(completedTraces)) - for fam := range completedTraces { - data.Families = append(data.Families, fam) - } - completedMu.RUnlock() - sort.Strings(data.Families) - - // We are careful here to minimize the time spent locking activeMu, - // since that lock is required every time an RPC starts and finishes. - data.ActiveTraceCount = make(map[string]int, len(data.Families)) - activeMu.RLock() - for fam, s := range activeTraces { - data.ActiveTraceCount[fam] = s.Len() - } - activeMu.RUnlock() - - var ok bool - data.Family, data.Bucket, ok = parseArgs(req) - switch { - case !ok: - // No-op - case data.Bucket == -1: - data.Active = true - n := data.ActiveTraceCount[data.Family] - data.Traces = getActiveTraces(data.Family) - if len(data.Traces) < n { - data.Total = n - } - case data.Bucket < bucketsPerFamily: - if b := lookupBucket(data.Family, data.Bucket); b != nil { - data.Traces = b.Copy(data.Traced) - } - default: - if f := getFamily(data.Family, false); f != nil { - var obs timeseries.Observable - f.LatencyMu.RLock() - switch o := data.Bucket - bucketsPerFamily; o { - case 0: - obs = f.Latency.Minute() - data.HistogramWindow = "last minute" - case 1: - obs = f.Latency.Hour() - data.HistogramWindow = "last hour" - case 2: - obs = f.Latency.Total() - data.HistogramWindow = "all time" - } - f.LatencyMu.RUnlock() - if obs != nil { - data.Histogram = obs.(*histogram).html() - } - } - } - - if data.Traces != nil { - defer data.Traces.Free() - sort.Sort(data.Traces) - } - - completedMu.RLock() - defer completedMu.RUnlock() - if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseArgs(req *http.Request) (fam string, b int, ok bool) { - if req == nil { - return "", 0, false - } - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < -1 { - return "", 0, false - } - - return fam, b, true -} - -func lookupBucket(fam string, b int) *traceBucket { - f := getFamily(fam, false) - if f == nil || b < 0 || b >= len(f.Buckets) { - return nil - } - return f.Buckets[b] -} - -type contextKeyT string - -var contextKey = contextKeyT("golang.org/x/net/trace.Trace") - -// Trace represents an active request. -type Trace interface { - // LazyLog adds x to the event log. It will be evaluated each time the - // /debug/requests page is rendered. Any memory referenced by x will be - // pinned until the trace is finished and later discarded. - LazyLog(x fmt.Stringer, sensitive bool) - - // LazyPrintf evaluates its arguments with fmt.Sprintf each time the - // /debug/requests page is rendered. Any memory referenced by a will be - // pinned until the trace is finished and later discarded. - LazyPrintf(format string, a ...interface{}) - - // SetError declares that this trace resulted in an error. - SetError() - - // SetRecycler sets a recycler for the trace. - // f will be called for each event passed to LazyLog at a time when - // it is no longer required, whether while the trace is still active - // and the event is discarded, or when a completed trace is discarded. - SetRecycler(f func(interface{})) - - // SetTraceInfo sets the trace info for the trace. - // This is currently unused. - SetTraceInfo(traceID, spanID uint64) - - // SetMaxEvents sets the maximum number of events that will be stored - // in the trace. This has no effect if any events have already been - // added to the trace. - SetMaxEvents(m int) - - // Finish declares that this trace is complete. - // The trace should not be used after calling this method. - Finish() -} - -type lazySprintf struct { - format string - a []interface{} -} - -func (l *lazySprintf) String() string { - return fmt.Sprintf(l.format, l.a...) -} - -// New returns a new Trace with the specified family and title. -func New(family, title string) Trace { - tr := newTrace() - tr.ref() - tr.Family, tr.Title = family, title - tr.Start = time.Now() - tr.maxEvents = maxEventsPerTrace - tr.events = tr.eventsBuf[:0] - - activeMu.RLock() - s := activeTraces[tr.Family] - activeMu.RUnlock() - if s == nil { - activeMu.Lock() - s = activeTraces[tr.Family] // check again - if s == nil { - s = new(traceSet) - activeTraces[tr.Family] = s - } - activeMu.Unlock() - } - s.Add(tr) - - // Trigger allocation of the completed trace structure for this family. - // This will cause the family to be present in the request page during - // the first trace of this family. We don't care about the return value, - // nor is there any need for this to run inline, so we execute it in its - // own goroutine, but only if the family isn't allocated yet. - completedMu.RLock() - if _, ok := completedTraces[tr.Family]; !ok { - go allocFamily(tr.Family) - } - completedMu.RUnlock() - - return tr -} - -func (tr *trace) Finish() { - tr.Elapsed = time.Now().Sub(tr.Start) - if DebugUseAfterFinish { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - tr.finishStack = buf[:n] - } - - activeMu.RLock() - m := activeTraces[tr.Family] - activeMu.RUnlock() - m.Remove(tr) - - f := getFamily(tr.Family, true) - for _, b := range f.Buckets { - if b.Cond.match(tr) { - b.Add(tr) - } - } - // Add a sample of elapsed time as microseconds to the family's timeseries - h := new(histogram) - h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) - f.LatencyMu.Lock() - f.Latency.Add(h) - f.LatencyMu.Unlock() - - tr.unref() // matches ref in New -} - -const ( - bucketsPerFamily = 9 - tracesPerBucket = 10 - maxActiveTraces = 20 // Maximum number of active traces to show. - maxEventsPerTrace = 10 - numHistogramBuckets = 38 -) - -var ( - // The active traces. - activeMu sync.RWMutex - activeTraces = make(map[string]*traceSet) // family -> traces - - // Families of completed traces. - completedMu sync.RWMutex - completedTraces = make(map[string]*family) // family -> traces -) - -type traceSet struct { - mu sync.RWMutex - m map[*trace]bool - - // We could avoid the entire map scan in FirstN by having a slice of all the traces - // ordered by start time, and an index into that from the trace struct, with a periodic - // repack of the slice after enough traces finish; we could also use a skip list or similar. - // However, that would shift some of the expense from /debug/requests time to RPC time, - // which is probably the wrong trade-off. -} - -func (ts *traceSet) Len() int { - ts.mu.RLock() - defer ts.mu.RUnlock() - return len(ts.m) -} - -func (ts *traceSet) Add(tr *trace) { - ts.mu.Lock() - if ts.m == nil { - ts.m = make(map[*trace]bool) - } - ts.m[tr] = true - ts.mu.Unlock() -} - -func (ts *traceSet) Remove(tr *trace) { - ts.mu.Lock() - delete(ts.m, tr) - ts.mu.Unlock() -} - -// FirstN returns the first n traces ordered by time. -func (ts *traceSet) FirstN(n int) traceList { - ts.mu.RLock() - defer ts.mu.RUnlock() - - if n > len(ts.m) { - n = len(ts.m) - } - trl := make(traceList, 0, n) - - // Fast path for when no selectivity is needed. - if n == len(ts.m) { - for tr := range ts.m { - tr.ref() - trl = append(trl, tr) - } - sort.Sort(trl) - return trl - } - - // Pick the oldest n traces. - // This is inefficient. See the comment in the traceSet struct. - for tr := range ts.m { - // Put the first n traces into trl in the order they occur. - // When we have n, sort trl, and thereafter maintain its order. - if len(trl) < n { - tr.ref() - trl = append(trl, tr) - if len(trl) == n { - // This is guaranteed to happen exactly once during this loop. - sort.Sort(trl) - } - continue - } - if tr.Start.After(trl[n-1].Start) { - continue - } - - // Find where to insert this one. - tr.ref() - i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) - trl[n-1].unref() - copy(trl[i+1:], trl[i:]) - trl[i] = tr - } - - return trl -} - -func getActiveTraces(fam string) traceList { - activeMu.RLock() - s := activeTraces[fam] - activeMu.RUnlock() - if s == nil { - return nil - } - return s.FirstN(maxActiveTraces) -} - -func getFamily(fam string, allocNew bool) *family { - completedMu.RLock() - f := completedTraces[fam] - completedMu.RUnlock() - if f == nil && allocNew { - f = allocFamily(fam) - } - return f -} - -func allocFamily(fam string) *family { - completedMu.Lock() - defer completedMu.Unlock() - f := completedTraces[fam] - if f == nil { - f = newFamily() - completedTraces[fam] = f - } - return f -} - -// family represents a set of trace buckets and associated latency information. -type family struct { - // traces may occur in multiple buckets. - Buckets [bucketsPerFamily]*traceBucket - - // latency time series - LatencyMu sync.RWMutex - Latency *timeseries.MinuteHourSeries -} - -func newFamily() *family { - return &family{ - Buckets: [bucketsPerFamily]*traceBucket{ - {Cond: minCond(0)}, - {Cond: minCond(50 * time.Millisecond)}, - {Cond: minCond(100 * time.Millisecond)}, - {Cond: minCond(200 * time.Millisecond)}, - {Cond: minCond(500 * time.Millisecond)}, - {Cond: minCond(1 * time.Second)}, - {Cond: minCond(10 * time.Second)}, - {Cond: minCond(100 * time.Second)}, - {Cond: errorCond{}}, - }, - Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), - } -} - -// traceBucket represents a size-capped bucket of historic traces, -// along with a condition for a trace to belong to the bucket. -type traceBucket struct { - Cond cond - - // Ring buffer implementation of a fixed-size FIFO queue. - mu sync.RWMutex - buf [tracesPerBucket]*trace - start int // < tracesPerBucket - length int // <= tracesPerBucket -} - -func (b *traceBucket) Add(tr *trace) { - b.mu.Lock() - defer b.mu.Unlock() - - i := b.start + b.length - if i >= tracesPerBucket { - i -= tracesPerBucket - } - if b.length == tracesPerBucket { - // "Remove" an element from the bucket. - b.buf[i].unref() - b.start++ - if b.start == tracesPerBucket { - b.start = 0 - } - } - b.buf[i] = tr - if b.length < tracesPerBucket { - b.length++ - } - tr.ref() -} - -// Copy returns a copy of the traces in the bucket. -// If tracedOnly is true, only the traces with trace information will be returned. -// The logs will be ref'd before returning; the caller should call -// the Free method when it is done with them. -// TODO(dsymonds): keep track of traced requests in separate buckets. -func (b *traceBucket) Copy(tracedOnly bool) traceList { - b.mu.RLock() - defer b.mu.RUnlock() - - trl := make(traceList, 0, b.length) - for i, x := 0, b.start; i < b.length; i++ { - tr := b.buf[x] - if !tracedOnly || tr.spanID != 0 { - tr.ref() - trl = append(trl, tr) - } - x++ - if x == b.length { - x = 0 - } - } - return trl -} - -func (b *traceBucket) Empty() bool { - b.mu.RLock() - defer b.mu.RUnlock() - return b.length == 0 -} - -// cond represents a condition on a trace. -type cond interface { - match(t *trace) bool - String() string -} - -type minCond time.Duration - -func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } -func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } - -type errorCond struct{} - -func (e errorCond) match(t *trace) bool { return t.IsError } -func (e errorCond) String() string { return "errors" } - -type traceList []*trace - -// Free calls unref on each element of the list. -func (trl traceList) Free() { - for _, t := range trl { - t.unref() - } -} - -// traceList may be sorted in reverse chronological order. -func (trl traceList) Len() int { return len(trl) } -func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } -func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } - -// An event is a timestamped log entry in a trace. -type event struct { - When time.Time - Elapsed time.Duration // since previous event in trace - NewDay bool // whether this event is on a different day to the previous event - Recyclable bool // whether this event was passed via LazyLog - Sensitive bool // whether this event contains sensitive information - What interface{} // string or fmt.Stringer -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e event) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// discarded represents a number of discarded events. -// It is stored as *discarded to make it easier to update in-place. -type discarded int - -func (d *discarded) String() string { - return fmt.Sprintf("(%d events discarded)", int(*d)) -} - -// trace represents an active or complete request, -// either sent or received by this program. -type trace struct { - // Family is the top-level grouping of traces to which this belongs. - Family string - - // Title is the title of this trace. - Title string - - // Timing information. - Start time.Time - Elapsed time.Duration // zero while active - - // Trace information if non-zero. - traceID uint64 - spanID uint64 - - // Whether this trace resulted in an error. - IsError bool - - // Append-only sequence of events (modulo discards). - mu sync.RWMutex - events []event - maxEvents int - - refs int32 // how many buckets this is in - recycler func(interface{}) - disc discarded // scratch space to avoid allocation - - finishStack []byte // where finish was called, if DebugUseAfterFinish is set - - eventsBuf [4]event // preallocated buffer in case we only log a few events -} - -func (tr *trace) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - tr.Family = "" - tr.Title = "" - tr.Start = time.Time{} - tr.Elapsed = 0 - tr.traceID = 0 - tr.spanID = 0 - tr.IsError = false - tr.maxEvents = 0 - tr.events = nil - tr.refs = 0 - tr.recycler = nil - tr.disc = 0 - tr.finishStack = nil - for i := range tr.eventsBuf { - tr.eventsBuf[i] = event{} - } -} - -// delta returns the elapsed time since the last event or the trace start, -// and whether it spans midnight. -// L >= tr.mu -func (tr *trace) delta(t time.Time) (time.Duration, bool) { - if len(tr.events) == 0 { - return t.Sub(tr.Start), false - } - prev := tr.events[len(tr.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() -} - -func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { - if DebugUseAfterFinish && tr.finishStack != nil { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) - } - - /* - NOTE TO DEBUGGERS - - If you are here because your program panicked in this code, - it is almost definitely the fault of code using this package, - and very unlikely to be the fault of this code. - - The most likely scenario is that some code elsewhere is using - a trace.Trace after its Finish method is called. - You can temporarily set the DebugUseAfterFinish var - to help discover where that is; do not leave that var set, - since it makes this package much less efficient. - */ - - e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} - tr.mu.Lock() - e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < tr.maxEvents { - tr.events = append(tr.events, e) - } else { - // Discard the middle events. - di := int((tr.maxEvents - 1) / 2) - if d, ok := tr.events[di].What.(*discarded); ok { - (*d)++ - } else { - // disc starts at two to count for the event it is replacing, - // plus the next one that we are about to drop. - tr.disc = 2 - if tr.recycler != nil && tr.events[di].Recyclable { - go tr.recycler(tr.events[di].What) - } - tr.events[di].What = &tr.disc - } - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - tr.events[di].When = tr.events[di+1].When - - if tr.recycler != nil && tr.events[di+1].Recyclable { - go tr.recycler(tr.events[di+1].What) - } - copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[tr.maxEvents-1] = e - } - tr.mu.Unlock() -} - -func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { - tr.addEvent(x, true, sensitive) -} - -func (tr *trace) LazyPrintf(format string, a ...interface{}) { - tr.addEvent(&lazySprintf{format, a}, false, false) -} - -func (tr *trace) SetError() { tr.IsError = true } - -func (tr *trace) SetRecycler(f func(interface{})) { - tr.recycler = f -} - -func (tr *trace) SetTraceInfo(traceID, spanID uint64) { - tr.traceID, tr.spanID = traceID, spanID -} - -func (tr *trace) SetMaxEvents(m int) { - // Always keep at least three events: first, discarded count, last. - if len(tr.events) == 0 && m > 3 { - tr.maxEvents = m - } -} - -func (tr *trace) ref() { - atomic.AddInt32(&tr.refs, 1) -} - -func (tr *trace) unref() { - if atomic.AddInt32(&tr.refs, -1) == 0 { - if tr.recycler != nil { - // freeTrace clears tr, so we hold tr.recycler and tr.events here. - go func(f func(interface{}), es []event) { - for _, e := range es { - if e.Recyclable { - f(e.What) - } - } - }(tr.recycler, tr.events) - } - - freeTrace(tr) - } -} - -func (tr *trace) When() string { - return tr.Start.Format("2006/01/02 15:04:05.000000") -} - -func (tr *trace) ElapsedTime() string { - t := tr.Elapsed - if t == 0 { - // Active trace. - t = time.Since(tr.Start) - } - return fmt.Sprintf("%.6f", t.Seconds()) -} - -func (tr *trace) Events() []event { - tr.mu.RLock() - defer tr.mu.RUnlock() - return tr.events -} - -var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? - -// newTrace returns a trace ready to use. -func newTrace() *trace { - select { - case tr := <-traceFreeList: - return tr - default: - return new(trace) - } -} - -// freeTrace adds tr to traceFreeList if there's room. -// This is non-blocking. -func freeTrace(tr *trace) { - if DebugUseAfterFinish { - return // never reuse - } - tr.reset() - select { - case traceFreeList <- tr: - default: - } -} - -func elapsed(d time.Duration) string { - b := []byte(fmt.Sprintf("%.6f", d.Seconds())) - - // For subsecond durations, blank all zeros before decimal point, - // and all zeros between the decimal point and the first non-zero digit. - if d < time.Second { - dot := bytes.IndexByte(b, '.') - for i := 0; i < dot; i++ { - b[i] = ' ' - } - for i := dot + 1; i < len(b); i++ { - if b[i] == '0' { - b[i] = ' ' - } else { - break - } - } - } - - return string(b) -} - -var pageTmplCache *template.Template -var pageTmplOnce sync.Once - -func pageTmpl() *template.Template { - pageTmplOnce.Do(func() { - pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, - }).Parse(pageHTML)) - }) - return pageTmplCache -} - -const pageHTML = ` -{{template "Prolog" .}} -{{template "StatusTable" .}} -{{template "Epilog" .}} - -{{define "Prolog"}} - - - /debug/requests - - - - -

    /debug/requests

    -{{end}} {{/* end of Prolog */}} - -{{define "StatusTable"}} - - {{range $fam := .Families}} - - - - {{$n := index $.ActiveTraceCount $fam}} - - - {{$f := index $.CompletedTraces $fam}} - {{range $i, $b := $f.Buckets}} - {{$empty := $b.Empty}} - - {{end}} - - {{$nb := len $f.Buckets}} - - - - - - {{end}} -
    {{$fam}} - {{if $n}}{{end}} - [{{$n}} active] - {{if $n}}{{end}} - - {{if not $empty}}{{end}} - [{{.Cond}}] - {{if not $empty}}{{end}} - - [minute] - - [hour] - - [total] -
    -{{end}} {{/* end of StatusTable */}} - -{{define "Epilog"}} -{{if $.Traces}} -
    -

    Family: {{$.Family}}

    - -{{if or $.Expanded $.Traced}} - [Normal/Summary] -{{else}} - [Normal/Summary] -{{end}} - -{{if or (not $.Expanded) $.Traced}} - [Normal/Expanded] -{{else}} - [Normal/Expanded] -{{end}} - -{{if not $.Active}} - {{if or $.Expanded (not $.Traced)}} - [Traced/Summary] - {{else}} - [Traced/Summary] - {{end}} - {{if or (not $.Expanded) (not $.Traced)}} - [Traced/Expanded] - {{else}} - [Traced/Expanded] - {{end}} -{{end}} - -{{if $.Total}} -

    Showing {{len $.Traces}} of {{$.Total}} traces.

    -{{end}} - - - - - {{range $tr := $.Traces}} - - - - - {{/* TODO: include traceID/spanID */}} - - {{if $.Expanded}} - {{range $tr.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
    - {{if $.Active}}Active{{else}}Completed{{end}} Requests -
    WhenElapsed (s)
    {{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
    {{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
    -{{end}} {{/* if $.Traces */}} - -{{if $.Histogram}} -

    Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

    -{{$.Histogram}} -{{end}} {{/* if $.Histogram */}} - - - -{{end}} {{/* end of Epilog */}} -` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go deleted file mode 100644 index d6081911..00000000 --- a/vendor/golang.org/x/net/trace/trace_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package trace - -import "golang.org/x/net/context" - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go deleted file mode 100644 index df6e1fba..00000000 --- a/vendor/golang.org/x/net/trace/trace_go17.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package trace - -import "context" - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} diff --git a/vendor/golang.org/x/net/trace/trace_test.go b/vendor/golang.org/x/net/trace/trace_test.go deleted file mode 100644 index bfd9dfe9..00000000 --- a/vendor/golang.org/x/net/trace/trace_test.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "net/http" - "reflect" - "testing" -) - -type s struct{} - -func (s) String() string { return "lazy string" } - -// TestReset checks whether all the fields are zeroed after reset. -func TestReset(t *testing.T) { - tr := New("foo", "bar") - tr.LazyLog(s{}, false) - tr.LazyPrintf("%d", 1) - tr.SetRecycler(func(_ interface{}) {}) - tr.SetTraceInfo(3, 4) - tr.SetMaxEvents(100) - tr.SetError() - tr.Finish() - - tr.(*trace).reset() - - if !reflect.DeepEqual(tr, new(trace)) { - t.Errorf("reset didn't clear all fields: %+v", tr) - } -} - -// TestResetLog checks whether all the fields are zeroed after reset. -func TestResetLog(t *testing.T) { - el := NewEventLog("foo", "bar") - el.Printf("message") - el.Errorf("error") - el.Finish() - - el.(*eventLog).reset() - - if !reflect.DeepEqual(el, new(eventLog)) { - t.Errorf("reset didn't clear all fields: %+v", el) - } -} - -func TestAuthRequest(t *testing.T) { - testCases := []struct { - host string - want bool - }{ - {host: "192.168.23.1", want: false}, - {host: "192.168.23.1:8080", want: false}, - {host: "malformed remote addr", want: false}, - {host: "localhost", want: true}, - {host: "localhost:8080", want: true}, - {host: "127.0.0.1", want: true}, - {host: "127.0.0.1:8080", want: true}, - {host: "::1", want: true}, - {host: "[::1]:8080", want: true}, - } - for _, tt := range testCases { - req := &http.Request{RemoteAddr: tt.host} - any, sensitive := AuthRequest(req) - if any != tt.want || sensitive != tt.want { - t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want) - } - } -} - -// TestParseTemplate checks that all templates used by this package are valid -// as they are parsed on first usage -func TestParseTemplate(t *testing.T) { - if tmpl := distTmpl(); tmpl == nil { - t.Error("invalid template returned from distTmpl()") - } - if tmpl := pageTmpl(); tmpl == nil { - t.Error("invalid template returned from pageTmpl()") - } - if tmpl := eventsTmpl(); tmpl == nil { - t.Error("invalid template returned from eventsTmpl()") - } -} - -func benchmarkTrace(b *testing.B, maxEvents, numEvents int) { - numSpans := (b.N + numEvents + 1) / numEvents - - for i := 0; i < numSpans; i++ { - tr := New("test", "test") - tr.SetMaxEvents(maxEvents) - for j := 0; j < numEvents; j++ { - tr.LazyPrintf("%d", j) - } - tr.Finish() - } -} - -func BenchmarkTrace_Default_2(b *testing.B) { - benchmarkTrace(b, 0, 2) -} - -func BenchmarkTrace_Default_10(b *testing.B) { - benchmarkTrace(b, 0, 10) -} - -func BenchmarkTrace_Default_100(b *testing.B) { - benchmarkTrace(b, 0, 100) -} - -func BenchmarkTrace_Default_1000(b *testing.B) { - benchmarkTrace(b, 0, 1000) -} - -func BenchmarkTrace_Default_10000(b *testing.B) { - benchmarkTrace(b, 0, 10000) -} - -func BenchmarkTrace_10_2(b *testing.B) { - benchmarkTrace(b, 10, 2) -} - -func BenchmarkTrace_10_10(b *testing.B) { - benchmarkTrace(b, 10, 10) -} - -func BenchmarkTrace_10_100(b *testing.B) { - benchmarkTrace(b, 10, 100) -} - -func BenchmarkTrace_10_1000(b *testing.B) { - benchmarkTrace(b, 10, 1000) -} - -func BenchmarkTrace_10_10000(b *testing.B) { - benchmarkTrace(b, 10, 10000) -} - -func BenchmarkTrace_100_2(b *testing.B) { - benchmarkTrace(b, 100, 2) -} - -func BenchmarkTrace_100_10(b *testing.B) { - benchmarkTrace(b, 100, 10) -} - -func BenchmarkTrace_100_100(b *testing.B) { - benchmarkTrace(b, 100, 100) -} - -func BenchmarkTrace_100_1000(b *testing.B) { - benchmarkTrace(b, 100, 1000) -} - -func BenchmarkTrace_100_10000(b *testing.B) { - benchmarkTrace(b, 100, 10000) -} - -func BenchmarkTrace_1000_2(b *testing.B) { - benchmarkTrace(b, 1000, 2) -} - -func BenchmarkTrace_1000_10(b *testing.B) { - benchmarkTrace(b, 1000, 10) -} - -func BenchmarkTrace_1000_100(b *testing.B) { - benchmarkTrace(b, 1000, 100) -} - -func BenchmarkTrace_1000_1000(b *testing.B) { - benchmarkTrace(b, 1000, 1000) -} - -func BenchmarkTrace_1000_10000(b *testing.B) { - benchmarkTrace(b, 1000, 10000) -} diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go deleted file mode 100644 index 748118dd..00000000 --- a/vendor/golang.org/x/net/webdav/file.go +++ /dev/null @@ -1,796 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -import ( - "encoding/xml" - "io" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "sync" - "time" - - "golang.org/x/net/context" -) - -// slashClean is equivalent to but slightly more efficient than -// path.Clean("/" + name). -func slashClean(name string) string { - if name == "" || name[0] != '/' { - name = "/" + name - } - return path.Clean(name) -} - -// A FileSystem implements access to a collection of named files. The elements -// in a file path are separated by slash ('/', U+002F) characters, regardless -// of host operating system convention. -// -// Each method has the same semantics as the os package's function of the same -// name. -// -// Note that the os.Rename documentation says that "OS-specific restrictions -// might apply". In particular, whether or not renaming a file or directory -// overwriting another existing file or directory is an error is OS-dependent. -type FileSystem interface { - Mkdir(ctx context.Context, name string, perm os.FileMode) error - OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) - RemoveAll(ctx context.Context, name string) error - Rename(ctx context.Context, oldName, newName string) error - Stat(ctx context.Context, name string) (os.FileInfo, error) -} - -// A File is returned by a FileSystem's OpenFile method and can be served by a -// Handler. -// -// A File may optionally implement the DeadPropsHolder interface, if it can -// load and save dead properties. -type File interface { - http.File - io.Writer -} - -// A Dir implements FileSystem using the native file system restricted to a -// specific directory tree. -// -// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's -// string value is a filename on the native file system, not a URL, so it is -// separated by filepath.Separator, which isn't necessarily '/'. -// -// An empty Dir is treated as ".". -type Dir string - -func (d Dir) resolve(name string) string { - // This implementation is based on Dir.Open's code in the standard net/http package. - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || - strings.Contains(name, "\x00") { - return "" - } - dir := string(d) - if dir == "" { - dir = "." - } - return filepath.Join(dir, filepath.FromSlash(slashClean(name))) -} - -func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error { - if name = d.resolve(name); name == "" { - return os.ErrNotExist - } - return os.Mkdir(name, perm) -} - -func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { - if name = d.resolve(name); name == "" { - return nil, os.ErrNotExist - } - f, err := os.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - return f, nil -} - -func (d Dir) RemoveAll(ctx context.Context, name string) error { - if name = d.resolve(name); name == "" { - return os.ErrNotExist - } - if name == filepath.Clean(string(d)) { - // Prohibit removing the virtual root directory. - return os.ErrInvalid - } - return os.RemoveAll(name) -} - -func (d Dir) Rename(ctx context.Context, oldName, newName string) error { - if oldName = d.resolve(oldName); oldName == "" { - return os.ErrNotExist - } - if newName = d.resolve(newName); newName == "" { - return os.ErrNotExist - } - if root := filepath.Clean(string(d)); root == oldName || root == newName { - // Prohibit renaming from or to the virtual root directory. - return os.ErrInvalid - } - return os.Rename(oldName, newName) -} - -func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) { - if name = d.resolve(name); name == "" { - return nil, os.ErrNotExist - } - return os.Stat(name) -} - -// NewMemFS returns a new in-memory FileSystem implementation. -func NewMemFS() FileSystem { - return &memFS{ - root: memFSNode{ - children: make(map[string]*memFSNode), - mode: 0660 | os.ModeDir, - modTime: time.Now(), - }, - } -} - -// A memFS implements FileSystem, storing all metadata and actual file data -// in-memory. No limits on filesystem size are used, so it is not recommended -// this be used where the clients are untrusted. -// -// Concurrent access is permitted. The tree structure is protected by a mutex, -// and each node's contents and metadata are protected by a per-node mutex. -// -// TODO: Enforce file permissions. -type memFS struct { - mu sync.Mutex - root memFSNode -} - -// TODO: clean up and rationalize the walk/find code. - -// walk walks the directory tree for the fullname, calling f at each step. If f -// returns an error, the walk will be aborted and return that same error. -// -// dir is the directory at that step, frag is the name fragment, and final is -// whether it is the final step. For example, walking "/foo/bar/x" will result -// in 3 calls to f: -// - "/", "foo", false -// - "/foo/", "bar", false -// - "/foo/bar/", "x", true -// The frag argument will be empty only if dir is the root node and the walk -// ends at that root node. -func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error { - original := fullname - fullname = slashClean(fullname) - - // Strip any leading "/"s to make fullname a relative path, as the walk - // starts at fs.root. - if fullname[0] == '/' { - fullname = fullname[1:] - } - dir := &fs.root - - for { - frag, remaining := fullname, "" - i := strings.IndexRune(fullname, '/') - final := i < 0 - if !final { - frag, remaining = fullname[:i], fullname[i+1:] - } - if frag == "" && dir != &fs.root { - panic("webdav: empty path fragment for a clean path") - } - if err := f(dir, frag, final); err != nil { - return &os.PathError{ - Op: op, - Path: original, - Err: err, - } - } - if final { - break - } - child := dir.children[frag] - if child == nil { - return &os.PathError{ - Op: op, - Path: original, - Err: os.ErrNotExist, - } - } - if !child.mode.IsDir() { - return &os.PathError{ - Op: op, - Path: original, - Err: os.ErrInvalid, - } - } - dir, fullname = child, remaining - } - return nil -} - -// find returns the parent of the named node and the relative name fragment -// from the parent to the child. For example, if finding "/foo/bar/baz" then -// parent will be the node for "/foo/bar" and frag will be "baz". -// -// If the fullname names the root node, then parent, frag and err will be zero. -// -// find returns an error if the parent does not already exist or the parent -// isn't a directory, but it will not return an error per se if the child does -// not already exist. The error returned is either nil or an *os.PathError -// whose Op is op. -func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) { - err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error { - if !final { - return nil - } - if frag0 != "" { - parent, frag = parent0, frag0 - } - return nil - }) - return parent, frag, err -} - -func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error { - fs.mu.Lock() - defer fs.mu.Unlock() - - dir, frag, err := fs.find("mkdir", name) - if err != nil { - return err - } - if dir == nil { - // We can't create the root. - return os.ErrInvalid - } - if _, ok := dir.children[frag]; ok { - return os.ErrExist - } - dir.children[frag] = &memFSNode{ - children: make(map[string]*memFSNode), - mode: perm.Perm() | os.ModeDir, - modTime: time.Now(), - } - return nil -} - -func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - - dir, frag, err := fs.find("open", name) - if err != nil { - return nil, err - } - var n *memFSNode - if dir == nil { - // We're opening the root. - if flag&(os.O_WRONLY|os.O_RDWR) != 0 { - return nil, os.ErrPermission - } - n, frag = &fs.root, "/" - - } else { - n = dir.children[frag] - if flag&(os.O_SYNC|os.O_APPEND) != 0 { - // memFile doesn't support these flags yet. - return nil, os.ErrInvalid - } - if flag&os.O_CREATE != 0 { - if flag&os.O_EXCL != 0 && n != nil { - return nil, os.ErrExist - } - if n == nil { - n = &memFSNode{ - mode: perm.Perm(), - } - dir.children[frag] = n - } - } - if n == nil { - return nil, os.ErrNotExist - } - if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 { - n.mu.Lock() - n.data = nil - n.mu.Unlock() - } - } - - children := make([]os.FileInfo, 0, len(n.children)) - for cName, c := range n.children { - children = append(children, c.stat(cName)) - } - return &memFile{ - n: n, - nameSnapshot: frag, - childrenSnapshot: children, - }, nil -} - -func (fs *memFS) RemoveAll(ctx context.Context, name string) error { - fs.mu.Lock() - defer fs.mu.Unlock() - - dir, frag, err := fs.find("remove", name) - if err != nil { - return err - } - if dir == nil { - // We can't remove the root. - return os.ErrInvalid - } - delete(dir.children, frag) - return nil -} - -func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error { - fs.mu.Lock() - defer fs.mu.Unlock() - - oldName = slashClean(oldName) - newName = slashClean(newName) - if oldName == newName { - return nil - } - if strings.HasPrefix(newName, oldName+"/") { - // We can't rename oldName to be a sub-directory of itself. - return os.ErrInvalid - } - - oDir, oFrag, err := fs.find("rename", oldName) - if err != nil { - return err - } - if oDir == nil { - // We can't rename from the root. - return os.ErrInvalid - } - - nDir, nFrag, err := fs.find("rename", newName) - if err != nil { - return err - } - if nDir == nil { - // We can't rename to the root. - return os.ErrInvalid - } - - oNode, ok := oDir.children[oFrag] - if !ok { - return os.ErrNotExist - } - if oNode.children != nil { - if nNode, ok := nDir.children[nFrag]; ok { - if nNode.children == nil { - return errNotADirectory - } - if len(nNode.children) != 0 { - return errDirectoryNotEmpty - } - } - } - delete(oDir.children, oFrag) - nDir.children[nFrag] = oNode - return nil -} - -func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - - dir, frag, err := fs.find("stat", name) - if err != nil { - return nil, err - } - if dir == nil { - // We're stat'ting the root. - return fs.root.stat("/"), nil - } - if n, ok := dir.children[frag]; ok { - return n.stat(path.Base(name)), nil - } - return nil, os.ErrNotExist -} - -// A memFSNode represents a single entry in the in-memory filesystem and also -// implements os.FileInfo. -type memFSNode struct { - // children is protected by memFS.mu. - children map[string]*memFSNode - - mu sync.Mutex - data []byte - mode os.FileMode - modTime time.Time - deadProps map[xml.Name]Property -} - -func (n *memFSNode) stat(name string) *memFileInfo { - n.mu.Lock() - defer n.mu.Unlock() - return &memFileInfo{ - name: name, - size: int64(len(n.data)), - mode: n.mode, - modTime: n.modTime, - } -} - -func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { - n.mu.Lock() - defer n.mu.Unlock() - if len(n.deadProps) == 0 { - return nil, nil - } - ret := make(map[xml.Name]Property, len(n.deadProps)) - for k, v := range n.deadProps { - ret[k] = v - } - return ret, nil -} - -func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { - n.mu.Lock() - defer n.mu.Unlock() - pstat := Propstat{Status: http.StatusOK} - for _, patch := range patches { - for _, p := range patch.Props { - pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) - if patch.Remove { - delete(n.deadProps, p.XMLName) - continue - } - if n.deadProps == nil { - n.deadProps = map[xml.Name]Property{} - } - n.deadProps[p.XMLName] = p - } - } - return []Propstat{pstat}, nil -} - -type memFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (f *memFileInfo) Name() string { return f.name } -func (f *memFileInfo) Size() int64 { return f.size } -func (f *memFileInfo) Mode() os.FileMode { return f.mode } -func (f *memFileInfo) ModTime() time.Time { return f.modTime } -func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() } -func (f *memFileInfo) Sys() interface{} { return nil } - -// A memFile is a File implementation for a memFSNode. It is a per-file (not -// per-node) read/write position, and a snapshot of the memFS' tree structure -// (a node's name and children) for that node. -type memFile struct { - n *memFSNode - nameSnapshot string - childrenSnapshot []os.FileInfo - // pos is protected by n.mu. - pos int -} - -// A *memFile implements the optional DeadPropsHolder interface. -var _ DeadPropsHolder = (*memFile)(nil) - -func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } -func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } - -func (f *memFile) Close() error { - return nil -} - -func (f *memFile) Read(p []byte) (int, error) { - f.n.mu.Lock() - defer f.n.mu.Unlock() - if f.n.mode.IsDir() { - return 0, os.ErrInvalid - } - if f.pos >= len(f.n.data) { - return 0, io.EOF - } - n := copy(p, f.n.data[f.pos:]) - f.pos += n - return n, nil -} - -func (f *memFile) Readdir(count int) ([]os.FileInfo, error) { - f.n.mu.Lock() - defer f.n.mu.Unlock() - if !f.n.mode.IsDir() { - return nil, os.ErrInvalid - } - old := f.pos - if old >= len(f.childrenSnapshot) { - // The os.File Readdir docs say that at the end of a directory, - // the error is io.EOF if count > 0 and nil if count <= 0. - if count > 0 { - return nil, io.EOF - } - return nil, nil - } - if count > 0 { - f.pos += count - if f.pos > len(f.childrenSnapshot) { - f.pos = len(f.childrenSnapshot) - } - } else { - f.pos = len(f.childrenSnapshot) - old = 0 - } - return f.childrenSnapshot[old:f.pos], nil -} - -func (f *memFile) Seek(offset int64, whence int) (int64, error) { - f.n.mu.Lock() - defer f.n.mu.Unlock() - npos := f.pos - // TODO: How to handle offsets greater than the size of system int? - switch whence { - case os.SEEK_SET: - npos = int(offset) - case os.SEEK_CUR: - npos += int(offset) - case os.SEEK_END: - npos = len(f.n.data) + int(offset) - default: - npos = -1 - } - if npos < 0 { - return 0, os.ErrInvalid - } - f.pos = npos - return int64(f.pos), nil -} - -func (f *memFile) Stat() (os.FileInfo, error) { - return f.n.stat(f.nameSnapshot), nil -} - -func (f *memFile) Write(p []byte) (int, error) { - lenp := len(p) - f.n.mu.Lock() - defer f.n.mu.Unlock() - - if f.n.mode.IsDir() { - return 0, os.ErrInvalid - } - if f.pos < len(f.n.data) { - n := copy(f.n.data[f.pos:], p) - f.pos += n - p = p[n:] - } else if f.pos > len(f.n.data) { - // Write permits the creation of holes, if we've seek'ed past the - // existing end of file. - if f.pos <= cap(f.n.data) { - oldLen := len(f.n.data) - f.n.data = f.n.data[:f.pos] - hole := f.n.data[oldLen:] - for i := range hole { - hole[i] = 0 - } - } else { - d := make([]byte, f.pos, f.pos+len(p)) - copy(d, f.n.data) - f.n.data = d - } - } - - if len(p) > 0 { - // We should only get here if f.pos == len(f.n.data). - f.n.data = append(f.n.data, p...) - f.pos = len(f.n.data) - } - f.n.modTime = time.Now() - return lenp, nil -} - -// moveFiles moves files and/or directories from src to dst. -// -// See section 9.9.4 for when various HTTP status codes apply. -func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) { - created := false - if _, err := fs.Stat(ctx, dst); err != nil { - if !os.IsNotExist(err) { - return http.StatusForbidden, err - } - created = true - } else if overwrite { - // Section 9.9.3 says that "If a resource exists at the destination - // and the Overwrite header is "T", then prior to performing the move, - // the server must perform a DELETE with "Depth: infinity" on the - // destination resource. - if err := fs.RemoveAll(ctx, dst); err != nil { - return http.StatusForbidden, err - } - } else { - return http.StatusPreconditionFailed, os.ErrExist - } - if err := fs.Rename(ctx, src, dst); err != nil { - return http.StatusForbidden, err - } - if created { - return http.StatusCreated, nil - } - return http.StatusNoContent, nil -} - -func copyProps(dst, src File) error { - d, ok := dst.(DeadPropsHolder) - if !ok { - return nil - } - s, ok := src.(DeadPropsHolder) - if !ok { - return nil - } - m, err := s.DeadProps() - if err != nil { - return err - } - props := make([]Property, 0, len(m)) - for _, prop := range m { - props = append(props, prop) - } - _, err = d.Patch([]Proppatch{{Props: props}}) - return err -} - -// copyFiles copies files and/or directories from src to dst. -// -// See section 9.8.5 for when various HTTP status codes apply. -func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { - if recursion == 1000 { - return http.StatusInternalServerError, errRecursionTooDeep - } - recursion++ - - // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ - // into /A/B/ could lead to infinite recursion if not handled correctly." - - srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0) - if err != nil { - if os.IsNotExist(err) { - return http.StatusNotFound, err - } - return http.StatusInternalServerError, err - } - defer srcFile.Close() - srcStat, err := srcFile.Stat() - if err != nil { - if os.IsNotExist(err) { - return http.StatusNotFound, err - } - return http.StatusInternalServerError, err - } - srcPerm := srcStat.Mode() & os.ModePerm - - created := false - if _, err := fs.Stat(ctx, dst); err != nil { - if os.IsNotExist(err) { - created = true - } else { - return http.StatusForbidden, err - } - } else { - if !overwrite { - return http.StatusPreconditionFailed, os.ErrExist - } - if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) { - return http.StatusForbidden, err - } - } - - if srcStat.IsDir() { - if err := fs.Mkdir(ctx, dst, srcPerm); err != nil { - return http.StatusForbidden, err - } - if depth == infiniteDepth { - children, err := srcFile.Readdir(-1) - if err != nil { - return http.StatusForbidden, err - } - for _, c := range children { - name := c.Name() - s := path.Join(src, name) - d := path.Join(dst, name) - cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion) - if cErr != nil { - // TODO: MultiStatus. - return cStatus, cErr - } - } - } - - } else { - dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) - if err != nil { - if os.IsNotExist(err) { - return http.StatusConflict, err - } - return http.StatusForbidden, err - - } - _, copyErr := io.Copy(dstFile, srcFile) - propsErr := copyProps(dstFile, srcFile) - closeErr := dstFile.Close() - if copyErr != nil { - return http.StatusInternalServerError, copyErr - } - if propsErr != nil { - return http.StatusInternalServerError, propsErr - } - if closeErr != nil { - return http.StatusInternalServerError, closeErr - } - } - - if created { - return http.StatusCreated, nil - } - return http.StatusNoContent, nil -} - -// walkFS traverses filesystem fs starting at name up to depth levels. -// -// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, -// walkFS calls walkFn. If a visited file system node is a directory and -// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. -func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { - // This implementation is based on Walk's code in the standard path/filepath package. - err := walkFn(name, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - if !info.IsDir() || depth == 0 { - return nil - } - if depth == 1 { - depth = 0 - } - - // Read directory names. - f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) - if err != nil { - return walkFn(name, info, err) - } - fileInfos, err := f.Readdir(0) - f.Close() - if err != nil { - return walkFn(name, info, err) - } - - for _, fileInfo := range fileInfos { - filename := path.Join(name, fileInfo.Name()) - fileInfo, err := fs.Stat(ctx, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} diff --git a/vendor/golang.org/x/net/webdav/file_go1.6.go b/vendor/golang.org/x/net/webdav/file_go1.6.go deleted file mode 100644 index fa387700..00000000 --- a/vendor/golang.org/x/net/webdav/file_go1.6.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package webdav - -import ( - "net/http" - - "golang.org/x/net/context" -) - -func getContext(r *http.Request) context.Context { - return context.Background() -} diff --git a/vendor/golang.org/x/net/webdav/file_go1.7.go b/vendor/golang.org/x/net/webdav/file_go1.7.go deleted file mode 100644 index d1c3de83..00000000 --- a/vendor/golang.org/x/net/webdav/file_go1.7.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package webdav - -import ( - "context" - "net/http" -) - -func getContext(r *http.Request) context.Context { - return r.Context() -} diff --git a/vendor/golang.org/x/net/webdav/file_test.go b/vendor/golang.org/x/net/webdav/file_test.go deleted file mode 100644 index bfd96e19..00000000 --- a/vendor/golang.org/x/net/webdav/file_test.go +++ /dev/null @@ -1,1184 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -import ( - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestSlashClean(t *testing.T) { - testCases := []string{ - "", - ".", - "/", - "/./", - "//", - "//.", - "//a", - "/a", - "/a/b/c", - "/a//b/./../c/d/", - "a", - "a/b/c", - } - for _, tc := range testCases { - got := slashClean(tc) - want := path.Clean("/" + tc) - if got != want { - t.Errorf("tc=%q: got %q, want %q", tc, got, want) - } - } -} - -func TestDirResolve(t *testing.T) { - testCases := []struct { - dir, name, want string - }{ - {"/", "", "/"}, - {"/", "/", "/"}, - {"/", ".", "/"}, - {"/", "./a", "/a"}, - {"/", "..", "/"}, - {"/", "..", "/"}, - {"/", "../", "/"}, - {"/", "../.", "/"}, - {"/", "../a", "/a"}, - {"/", "../..", "/"}, - {"/", "../bar/a", "/bar/a"}, - {"/", "../baz/a", "/baz/a"}, - {"/", "...", "/..."}, - {"/", ".../a", "/.../a"}, - {"/", ".../..", "/"}, - {"/", "a", "/a"}, - {"/", "a/./b", "/a/b"}, - {"/", "a/../../b", "/b"}, - {"/", "a/../b", "/b"}, - {"/", "a/b", "/a/b"}, - {"/", "a/b/c/../../d", "/a/d"}, - {"/", "a/b/c/../../../d", "/d"}, - {"/", "a/b/c/../../../../d", "/d"}, - {"/", "a/b/c/d", "/a/b/c/d"}, - - {"/foo/bar", "", "/foo/bar"}, - {"/foo/bar", "/", "/foo/bar"}, - {"/foo/bar", ".", "/foo/bar"}, - {"/foo/bar", "./a", "/foo/bar/a"}, - {"/foo/bar", "..", "/foo/bar"}, - {"/foo/bar", "../", "/foo/bar"}, - {"/foo/bar", "../.", "/foo/bar"}, - {"/foo/bar", "../a", "/foo/bar/a"}, - {"/foo/bar", "../..", "/foo/bar"}, - {"/foo/bar", "../bar/a", "/foo/bar/bar/a"}, - {"/foo/bar", "../baz/a", "/foo/bar/baz/a"}, - {"/foo/bar", "...", "/foo/bar/..."}, - {"/foo/bar", ".../a", "/foo/bar/.../a"}, - {"/foo/bar", ".../..", "/foo/bar"}, - {"/foo/bar", "a", "/foo/bar/a"}, - {"/foo/bar", "a/./b", "/foo/bar/a/b"}, - {"/foo/bar", "a/../../b", "/foo/bar/b"}, - {"/foo/bar", "a/../b", "/foo/bar/b"}, - {"/foo/bar", "a/b", "/foo/bar/a/b"}, - {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"}, - {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"}, - {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"}, - {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"}, - - {"/foo/bar/", "", "/foo/bar"}, - {"/foo/bar/", "/", "/foo/bar"}, - {"/foo/bar/", ".", "/foo/bar"}, - {"/foo/bar/", "./a", "/foo/bar/a"}, - {"/foo/bar/", "..", "/foo/bar"}, - - {"/foo//bar///", "", "/foo/bar"}, - {"/foo//bar///", "/", "/foo/bar"}, - {"/foo//bar///", ".", "/foo/bar"}, - {"/foo//bar///", "./a", "/foo/bar/a"}, - {"/foo//bar///", "..", "/foo/bar"}, - - {"/x/y/z", "ab/c\x00d/ef", ""}, - - {".", "", "."}, - {".", "/", "."}, - {".", ".", "."}, - {".", "./a", "a"}, - {".", "..", "."}, - {".", "..", "."}, - {".", "../", "."}, - {".", "../.", "."}, - {".", "../a", "a"}, - {".", "../..", "."}, - {".", "../bar/a", "bar/a"}, - {".", "../baz/a", "baz/a"}, - {".", "...", "..."}, - {".", ".../a", ".../a"}, - {".", ".../..", "."}, - {".", "a", "a"}, - {".", "a/./b", "a/b"}, - {".", "a/../../b", "b"}, - {".", "a/../b", "b"}, - {".", "a/b", "a/b"}, - {".", "a/b/c/../../d", "a/d"}, - {".", "a/b/c/../../../d", "d"}, - {".", "a/b/c/../../../../d", "d"}, - {".", "a/b/c/d", "a/b/c/d"}, - - {"", "", "."}, - {"", "/", "."}, - {"", ".", "."}, - {"", "./a", "a"}, - {"", "..", "."}, - } - - for _, tc := range testCases { - d := Dir(filepath.FromSlash(tc.dir)) - if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want { - t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want) - } - } -} - -func TestWalk(t *testing.T) { - type walkStep struct { - name, frag string - final bool - } - - testCases := []struct { - dir string - want []walkStep - }{ - {"", []walkStep{ - {"", "", true}, - }}, - {"/", []walkStep{ - {"", "", true}, - }}, - {"/a", []walkStep{ - {"", "a", true}, - }}, - {"/a/", []walkStep{ - {"", "a", true}, - }}, - {"/a/b", []walkStep{ - {"", "a", false}, - {"a", "b", true}, - }}, - {"/a/b/", []walkStep{ - {"", "a", false}, - {"a", "b", true}, - }}, - {"/a/b/c", []walkStep{ - {"", "a", false}, - {"a", "b", false}, - {"b", "c", true}, - }}, - // The following test case is the one mentioned explicitly - // in the method description. - {"/foo/bar/x", []walkStep{ - {"", "foo", false}, - {"foo", "bar", false}, - {"bar", "x", true}, - }}, - } - - ctx := context.Background() - - for _, tc := range testCases { - fs := NewMemFS().(*memFS) - - parts := strings.Split(tc.dir, "/") - for p := 2; p < len(parts); p++ { - d := strings.Join(parts[:p], "/") - if err := fs.Mkdir(ctx, d, 0666); err != nil { - t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err) - } - } - - i, prevFrag := 0, "" - err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error { - got := walkStep{ - name: prevFrag, - frag: frag, - final: final, - } - want := tc.want[i] - - if got != want { - return fmt.Errorf("got %+v, want %+v", got, want) - } - i, prevFrag = i+1, frag - return nil - }) - if err != nil { - t.Errorf("tc.dir=%q: %v", tc.dir, err) - } - } -} - -// find appends to ss the names of the named file and its children. It is -// analogous to the Unix find command. -// -// The returned strings are not guaranteed to be in any particular order. -func find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) { - stat, err := fs.Stat(ctx, name) - if err != nil { - return nil, err - } - ss = append(ss, name) - if stat.IsDir() { - f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) - if err != nil { - return nil, err - } - defer f.Close() - children, err := f.Readdir(-1) - if err != nil { - return nil, err - } - for _, c := range children { - ss, err = find(ctx, ss, fs, path.Join(name, c.Name())) - if err != nil { - return nil, err - } - } - } - return ss, nil -} - -func testFS(t *testing.T, fs FileSystem) { - errStr := func(err error) string { - switch { - case os.IsExist(err): - return "errExist" - case os.IsNotExist(err): - return "errNotExist" - case err != nil: - return "err" - } - return "ok" - } - - // The non-"find" non-"stat" test cases should change the file system state. The - // indentation of the "find"s and "stat"s helps distinguish such test cases. - testCases := []string{ - " stat / want dir", - " stat /a want errNotExist", - " stat /d want errNotExist", - " stat /d/e want errNotExist", - "create /a A want ok", - " stat /a want 1", - "create /d/e EEE want errNotExist", - "mk-dir /a want errExist", - "mk-dir /d/m want errNotExist", - "mk-dir /d want ok", - " stat /d want dir", - "create /d/e EEE want ok", - " stat /d/e want 3", - " find / /a /d /d/e", - "create /d/f FFFF want ok", - "create /d/g GGGGGGG want ok", - "mk-dir /d/m want ok", - "mk-dir /d/m want errExist", - "create /d/m/p PPPPP want ok", - " stat /d/e want 3", - " stat /d/f want 4", - " stat /d/g want 7", - " stat /d/h want errNotExist", - " stat /d/m want dir", - " stat /d/m/p want 5", - " find / /a /d /d/e /d/f /d/g /d/m /d/m/p", - "rm-all /d want ok", - " stat /a want 1", - " stat /d want errNotExist", - " stat /d/e want errNotExist", - " stat /d/f want errNotExist", - " stat /d/g want errNotExist", - " stat /d/m want errNotExist", - " stat /d/m/p want errNotExist", - " find / /a", - "mk-dir /d/m want errNotExist", - "mk-dir /d want ok", - "create /d/f FFFF want ok", - "rm-all /d/f want ok", - "mk-dir /d/m want ok", - "rm-all /z want ok", - "rm-all / want err", - "create /b BB want ok", - " stat / want dir", - " stat /a want 1", - " stat /b want 2", - " stat /c want errNotExist", - " stat /d want dir", - " stat /d/m want dir", - " find / /a /b /d /d/m", - "move__ o=F /b /c want ok", - " stat /b want errNotExist", - " stat /c want 2", - " stat /d/m want dir", - " stat /d/n want errNotExist", - " find / /a /c /d /d/m", - "move__ o=F /d/m /d/n want ok", - "create /d/n/q QQQQ want ok", - " stat /d/m want errNotExist", - " stat /d/n want dir", - " stat /d/n/q want 4", - "move__ o=F /d /d/n/z want err", - "move__ o=T /c /d/n/q want ok", - " stat /c want errNotExist", - " stat /d/n/q want 2", - " find / /a /d /d/n /d/n/q", - "create /d/n/r RRRRR want ok", - "mk-dir /u want ok", - "mk-dir /u/v want ok", - "move__ o=F /d/n /u want errExist", - "create /t TTTTTT want ok", - "move__ o=F /d/n /t want errExist", - "rm-all /t want ok", - "move__ o=F /d/n /t want ok", - " stat /d want dir", - " stat /d/n want errNotExist", - " stat /d/n/r want errNotExist", - " stat /t want dir", - " stat /t/q want 2", - " stat /t/r want 5", - " find / /a /d /t /t/q /t/r /u /u/v", - "move__ o=F /t / want errExist", - "move__ o=T /t /u/v want ok", - " stat /u/v/r want 5", - "move__ o=F / /z want err", - " find / /a /d /u /u/v /u/v/q /u/v/r", - " stat /a want 1", - " stat /b want errNotExist", - " stat /c want errNotExist", - " stat /u/v/r want 5", - "copy__ o=F d=0 /a /b want ok", - "copy__ o=T d=0 /a /c want ok", - " stat /a want 1", - " stat /b want 1", - " stat /c want 1", - " stat /u/v/r want 5", - "copy__ o=F d=0 /u/v/r /b want errExist", - " stat /b want 1", - "copy__ o=T d=0 /u/v/r /b want ok", - " stat /a want 1", - " stat /b want 5", - " stat /u/v/r want 5", - "rm-all /a want ok", - "rm-all /b want ok", - "mk-dir /u/v/w want ok", - "create /u/v/w/s SSSSSSSS want ok", - " stat /d want dir", - " stat /d/x want errNotExist", - " stat /d/y want errNotExist", - " stat /u/v/r want 5", - " stat /u/v/w/s want 8", - " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s", - "copy__ o=T d=0 /u/v /d/x want ok", - "copy__ o=T d=∞ /u/v /d/y want ok", - "rm-all /u want ok", - " stat /d/x want dir", - " stat /d/x/q want errNotExist", - " stat /d/x/r want errNotExist", - " stat /d/x/w want errNotExist", - " stat /d/x/w/s want errNotExist", - " stat /d/y want dir", - " stat /d/y/q want 2", - " stat /d/y/r want 5", - " stat /d/y/w want dir", - " stat /d/y/w/s want 8", - " stat /u want errNotExist", - " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s", - "copy__ o=F d=∞ /d/y /d/x want errExist", - } - - ctx := context.Background() - - for i, tc := range testCases { - tc = strings.TrimSpace(tc) - j := strings.IndexByte(tc, ' ') - if j < 0 { - t.Fatalf("test case #%d %q: invalid command", i, tc) - } - op, arg := tc[:j], tc[j+1:] - - switch op { - default: - t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) - - case "create": - parts := strings.Split(arg, " ") - if len(parts) != 4 || parts[2] != "want" { - t.Fatalf("test case #%d %q: invalid write", i, tc) - } - f, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) - if got := errStr(opErr); got != parts[3] { - t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3]) - } - if f != nil { - if _, err := f.Write([]byte(parts[1])); err != nil { - t.Fatalf("test case #%d %q: Write: %v", i, tc, err) - } - if err := f.Close(); err != nil { - t.Fatalf("test case #%d %q: Close: %v", i, tc, err) - } - } - - case "find": - got, err := find(ctx, nil, fs, "/") - if err != nil { - t.Fatalf("test case #%d %q: find: %v", i, tc, err) - } - sort.Strings(got) - want := strings.Split(arg, " ") - if !reflect.DeepEqual(got, want) { - t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want) - } - - case "copy__", "mk-dir", "move__", "rm-all", "stat": - nParts := 3 - switch op { - case "copy__": - nParts = 6 - case "move__": - nParts = 5 - } - parts := strings.Split(arg, " ") - if len(parts) != nParts { - t.Fatalf("test case #%d %q: invalid %s", i, tc, op) - } - - got, opErr := "", error(nil) - switch op { - case "copy__": - depth := 0 - if parts[1] == "d=∞" { - depth = infiniteDepth - } - _, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) - case "mk-dir": - opErr = fs.Mkdir(ctx, parts[0], 0777) - case "move__": - _, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == "o=T") - case "rm-all": - opErr = fs.RemoveAll(ctx, parts[0]) - case "stat": - var stat os.FileInfo - fileName := parts[0] - if stat, opErr = fs.Stat(ctx, fileName); opErr == nil { - if stat.IsDir() { - got = "dir" - } else { - got = strconv.Itoa(int(stat.Size())) - } - - if fileName == "/" { - // For a Dir FileSystem, the virtual file system root maps to a - // real file system name like "/tmp/webdav-test012345", which does - // not end with "/". We skip such cases. - } else if statName := stat.Name(); path.Base(fileName) != statName { - t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q", - i, tc, fileName, statName) - } - } - } - if got == "" { - got = errStr(opErr) - } - - if parts[len(parts)-2] != "want" { - t.Fatalf("test case #%d %q: invalid %s", i, tc, op) - } - if want := parts[len(parts)-1]; got != want { - t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want) - } - } - } -} - -func TestDir(t *testing.T) { - switch runtime.GOOS { - case "nacl": - t.Skip("see golang.org/issue/12004") - case "plan9": - t.Skip("see golang.org/issue/11453") - } - - td, err := ioutil.TempDir("", "webdav-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(td) - testFS(t, Dir(td)) -} - -func TestMemFS(t *testing.T) { - testFS(t, NewMemFS()) -} - -func TestMemFSRoot(t *testing.T) { - ctx := context.Background() - fs := NewMemFS() - for i := 0; i < 5; i++ { - stat, err := fs.Stat(ctx, "/") - if err != nil { - t.Fatalf("i=%d: Stat: %v", i, err) - } - if !stat.IsDir() { - t.Fatalf("i=%d: Stat.IsDir is false, want true", i) - } - - f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0) - if err != nil { - t.Fatalf("i=%d: OpenFile: %v", i, err) - } - defer f.Close() - children, err := f.Readdir(-1) - if err != nil { - t.Fatalf("i=%d: Readdir: %v", i, err) - } - if len(children) != i { - t.Fatalf("i=%d: got %d children, want %d", i, len(children), i) - } - - if _, err := f.Write(make([]byte, 1)); err == nil { - t.Fatalf("i=%d: Write: got nil error, want non-nil", i) - } - - if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil { - t.Fatalf("i=%d: Mkdir: %v", i, err) - } - } -} - -func TestMemFileReaddir(t *testing.T) { - ctx := context.Background() - fs := NewMemFS() - if err := fs.Mkdir(ctx, "/foo", 0777); err != nil { - t.Fatalf("Mkdir: %v", err) - } - readdir := func(count int) ([]os.FileInfo, error) { - f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0) - if err != nil { - t.Fatalf("OpenFile: %v", err) - } - defer f.Close() - return f.Readdir(count) - } - if got, err := readdir(-1); len(got) != 0 || err != nil { - t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, ", len(got), err) - } - if got, err := readdir(+1); len(got) != 0 || err != io.EOF { - t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err) - } -} - -func TestMemFile(t *testing.T) { - testCases := []string{ - "wantData ", - "wantSize 0", - "write abc", - "wantData abc", - "write de", - "wantData abcde", - "wantSize 5", - "write 5*x", - "write 4*y+2*z", - "write 3*st", - "wantData abcdexxxxxyyyyzzststst", - "wantSize 22", - "seek set 4 want 4", - "write EFG", - "wantData abcdEFGxxxyyyyzzststst", - "wantSize 22", - "seek set 2 want 2", - "read cdEF", - "read Gx", - "seek cur 0 want 8", - "seek cur 2 want 10", - "seek cur -1 want 9", - "write J", - "wantData abcdEFGxxJyyyyzzststst", - "wantSize 22", - "seek cur -4 want 6", - "write ghijk", - "wantData abcdEFghijkyyyzzststst", - "wantSize 22", - "read yyyz", - "seek cur 0 want 15", - "write ", - "seek cur 0 want 15", - "read ", - "seek cur 0 want 15", - "seek end -3 want 19", - "write ZZ", - "wantData abcdEFghijkyyyzzstsZZt", - "wantSize 22", - "write 4*A", - "wantData abcdEFghijkyyyzzstsZZAAAA", - "wantSize 25", - "seek end 0 want 25", - "seek end -5 want 20", - "read Z+4*A", - "write 5*B", - "wantData abcdEFghijkyyyzzstsZZAAAABBBBB", - "wantSize 30", - "seek end 10 want 40", - "write C", - "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C", - "wantSize 41", - "write D", - "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD", - "wantSize 42", - "seek set 43 want 43", - "write E", - "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E", - "wantSize 44", - "seek set 0 want 0", - "write 5*123456789_", - "wantData 123456789_123456789_123456789_123456789_123456789_", - "wantSize 50", - "seek cur 0 want 50", - "seek cur -99 want err", - } - - ctx := context.Background() - - const filename = "/foo" - fs := NewMemFS() - f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - t.Fatalf("OpenFile: %v", err) - } - defer f.Close() - - for i, tc := range testCases { - j := strings.IndexByte(tc, ' ') - if j < 0 { - t.Fatalf("test case #%d %q: invalid command", i, tc) - } - op, arg := tc[:j], tc[j+1:] - - // Expand an arg like "3*a+2*b" to "aaabb". - parts := strings.Split(arg, "+") - for j, part := range parts { - if k := strings.IndexByte(part, '*'); k >= 0 { - repeatCount, repeatStr := part[:k], part[k+1:] - n, err := strconv.Atoi(repeatCount) - if err != nil { - t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount) - } - parts[j] = strings.Repeat(repeatStr, n) - } - } - arg = strings.Join(parts, "") - - switch op { - default: - t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) - - case "read": - buf := make([]byte, len(arg)) - if _, err := io.ReadFull(f, buf); err != nil { - t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err) - } - if got := string(buf); got != arg { - t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) - } - - case "seek": - parts := strings.Split(arg, " ") - if len(parts) != 4 { - t.Fatalf("test case #%d %q: invalid seek", i, tc) - } - - whence := 0 - switch parts[0] { - default: - t.Fatalf("test case #%d %q: invalid seek whence", i, tc) - case "set": - whence = os.SEEK_SET - case "cur": - whence = os.SEEK_CUR - case "end": - whence = os.SEEK_END - } - offset, err := strconv.Atoi(parts[1]) - if err != nil { - t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1]) - } - - if parts[2] != "want" { - t.Fatalf("test case #%d %q: invalid seek", i, tc) - } - if parts[3] == "err" { - _, err := f.Seek(int64(offset), whence) - if err == nil { - t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc) - } - } else { - got, err := f.Seek(int64(offset), whence) - if err != nil { - t.Fatalf("test case #%d %q: Seek: %v", i, tc, err) - } - want, err := strconv.Atoi(parts[3]) - if err != nil { - t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3]) - } - if got != int64(want) { - t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) - } - } - - case "write": - n, err := f.Write([]byte(arg)) - if err != nil { - t.Fatalf("test case #%d %q: write: %v", i, tc, err) - } - if n != len(arg) { - t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg)) - } - - case "wantData": - g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666) - if err != nil { - t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err) - } - gotBytes, err := ioutil.ReadAll(g) - if err != nil { - t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err) - } - for i, c := range gotBytes { - if c == '\x00' { - gotBytes[i] = '.' - } - } - got := string(gotBytes) - if got != arg { - t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) - } - if err := g.Close(); err != nil { - t.Fatalf("test case #%d %q: Close: %v", i, tc, err) - } - - case "wantSize": - n, err := strconv.Atoi(arg) - if err != nil { - t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg) - } - fi, err := fs.Stat(ctx, filename) - if err != nil { - t.Fatalf("test case #%d %q: Stat: %v", i, tc, err) - } - if got, want := fi.Size(), int64(n); got != want { - t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) - } - } - } -} - -// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a -// memFile doesn't allocate a new buffer for each of those N times. Otherwise, -// calling io.Copy(aMemFile, src) is likely to have quadratic complexity. -func TestMemFileWriteAllocs(t *testing.T) { - if runtime.Compiler == "gccgo" { - t.Skip("gccgo allocates here") - } - ctx := context.Background() - fs := NewMemFS() - f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - t.Fatalf("OpenFile: %v", err) - } - defer f.Close() - - xxx := make([]byte, 1024) - for i := range xxx { - xxx[i] = 'x' - } - - a := testing.AllocsPerRun(100, func() { - f.Write(xxx) - }) - // AllocsPerRun returns an integral value, so we compare the rounded-down - // number to zero. - if a > 0 { - t.Fatalf("%v allocs per run, want 0", a) - } -} - -func BenchmarkMemFileWrite(b *testing.B) { - ctx := context.Background() - fs := NewMemFS() - xxx := make([]byte, 1024) - for i := range xxx { - xxx[i] = 'x' - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - b.Fatalf("OpenFile: %v", err) - } - for j := 0; j < 100; j++ { - f.Write(xxx) - } - if err := f.Close(); err != nil { - b.Fatalf("Close: %v", err) - } - if err := fs.RemoveAll(ctx, "/xxx"); err != nil { - b.Fatalf("RemoveAll: %v", err) - } - } -} - -func TestCopyMoveProps(t *testing.T) { - ctx := context.Background() - fs := NewMemFS() - create := func(name string) error { - f, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - return err - } - _, wErr := f.Write([]byte("contents")) - cErr := f.Close() - if wErr != nil { - return wErr - } - return cErr - } - patch := func(name string, patches ...Proppatch) error { - f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) - if err != nil { - return err - } - _, pErr := f.(DeadPropsHolder).Patch(patches) - cErr := f.Close() - if pErr != nil { - return pErr - } - return cErr - } - props := func(name string) (map[xml.Name]Property, error) { - f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) - if err != nil { - return nil, err - } - m, pErr := f.(DeadPropsHolder).DeadProps() - cErr := f.Close() - if pErr != nil { - return nil, pErr - } - if cErr != nil { - return nil, cErr - } - return m, nil - } - - p0 := Property{ - XMLName: xml.Name{Space: "x:", Local: "boat"}, - InnerXML: []byte("pea-green"), - } - p1 := Property{ - XMLName: xml.Name{Space: "x:", Local: "ring"}, - InnerXML: []byte("1 shilling"), - } - p2 := Property{ - XMLName: xml.Name{Space: "x:", Local: "spoon"}, - InnerXML: []byte("runcible"), - } - p3 := Property{ - XMLName: xml.Name{Space: "x:", Local: "moon"}, - InnerXML: []byte("light"), - } - - if err := create("/src"); err != nil { - t.Fatalf("create /src: %v", err) - } - if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { - t.Fatalf("patch /src +p0 +p1: %v", err) - } - if _, err := copyFiles(ctx, fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { - t.Fatalf("copyFiles /src /tmp: %v", err) - } - if _, err := moveFiles(ctx, fs, "/tmp", "/dst", true); err != nil { - t.Fatalf("moveFiles /tmp /dst: %v", err) - } - if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { - t.Fatalf("patch /src -p0: %v", err) - } - if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil { - t.Fatalf("patch /src +p2: %v", err) - } - if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil { - t.Fatalf("patch /dst -p1: %v", err) - } - if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil { - t.Fatalf("patch /dst +p3: %v", err) - } - - gotSrc, err := props("/src") - if err != nil { - t.Fatalf("props /src: %v", err) - } - wantSrc := map[xml.Name]Property{ - p1.XMLName: p1, - p2.XMLName: p2, - } - if !reflect.DeepEqual(gotSrc, wantSrc) { - t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc) - } - - gotDst, err := props("/dst") - if err != nil { - t.Fatalf("props /dst: %v", err) - } - wantDst := map[xml.Name]Property{ - p0.XMLName: p0, - p3.XMLName: p3, - } - if !reflect.DeepEqual(gotDst, wantDst) { - t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst) - } -} - -func TestWalkFS(t *testing.T) { - testCases := []struct { - desc string - buildfs []string - startAt string - depth int - walkFn filepath.WalkFunc - want []string - }{{ - "just root", - []string{}, - "/", - infiniteDepth, - nil, - []string{ - "/", - }, - }, { - "infinite walk from root", - []string{ - "mkdir /a", - "mkdir /a/b", - "touch /a/b/c", - "mkdir /a/d", - "mkdir /e", - "touch /f", - }, - "/", - infiniteDepth, - nil, - []string{ - "/", - "/a", - "/a/b", - "/a/b/c", - "/a/d", - "/e", - "/f", - }, - }, { - "infinite walk from subdir", - []string{ - "mkdir /a", - "mkdir /a/b", - "touch /a/b/c", - "mkdir /a/d", - "mkdir /e", - "touch /f", - }, - "/a", - infiniteDepth, - nil, - []string{ - "/a", - "/a/b", - "/a/b/c", - "/a/d", - }, - }, { - "depth 1 walk from root", - []string{ - "mkdir /a", - "mkdir /a/b", - "touch /a/b/c", - "mkdir /a/d", - "mkdir /e", - "touch /f", - }, - "/", - 1, - nil, - []string{ - "/", - "/a", - "/e", - "/f", - }, - }, { - "depth 1 walk from subdir", - []string{ - "mkdir /a", - "mkdir /a/b", - "touch /a/b/c", - "mkdir /a/b/g", - "mkdir /a/b/g/h", - "touch /a/b/g/i", - "touch /a/b/g/h/j", - }, - "/a/b", - 1, - nil, - []string{ - "/a/b", - "/a/b/c", - "/a/b/g", - }, - }, { - "depth 0 walk from subdir", - []string{ - "mkdir /a", - "mkdir /a/b", - "touch /a/b/c", - "mkdir /a/b/g", - "mkdir /a/b/g/h", - "touch /a/b/g/i", - "touch /a/b/g/h/j", - }, - "/a/b", - 0, - nil, - []string{ - "/a/b", - }, - }, { - "infinite walk from file", - []string{ - "mkdir /a", - "touch /a/b", - "touch /a/c", - }, - "/a/b", - 0, - nil, - []string{ - "/a/b", - }, - }, { - "infinite walk with skipped subdir", - []string{ - "mkdir /a", - "mkdir /a/b", - "touch /a/b/c", - "mkdir /a/b/g", - "mkdir /a/b/g/h", - "touch /a/b/g/i", - "touch /a/b/g/h/j", - "touch /a/b/z", - }, - "/", - infiniteDepth, - func(path string, info os.FileInfo, err error) error { - if path == "/a/b/g" { - return filepath.SkipDir - } - return nil - }, - []string{ - "/", - "/a", - "/a/b", - "/a/b/c", - "/a/b/z", - }, - }} - ctx := context.Background() - for _, tc := range testCases { - fs, err := buildTestFS(tc.buildfs) - if err != nil { - t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) - } - var got []string - traceFn := func(path string, info os.FileInfo, err error) error { - if tc.walkFn != nil { - err = tc.walkFn(path, info, err) - if err != nil { - return err - } - } - got = append(got, path) - return nil - } - fi, err := fs.Stat(ctx, tc.startAt) - if err != nil { - t.Fatalf("%s: cannot stat: %v", tc.desc, err) - } - err = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn) - if err != nil { - t.Errorf("%s:\ngot error %v, want nil", tc.desc, err) - continue - } - sort.Strings(got) - sort.Strings(tc.want) - if !reflect.DeepEqual(got, tc.want) { - t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want) - continue - } - } -} - -func buildTestFS(buildfs []string) (FileSystem, error) { - // TODO: Could this be merged with the build logic in TestFS? - - ctx := context.Background() - fs := NewMemFS() - for _, b := range buildfs { - op := strings.Split(b, " ") - switch op[0] { - case "mkdir": - err := fs.Mkdir(ctx, op[1], os.ModeDir|0777) - if err != nil { - return nil, err - } - case "touch": - f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666) - if err != nil { - return nil, err - } - f.Close() - case "write": - f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - return nil, err - } - _, err = f.Write([]byte(op[2])) - f.Close() - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unknown file operation %q", op[0]) - } - } - return fs, nil -} diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go deleted file mode 100644 index 416e81cd..00000000 --- a/vendor/golang.org/x/net/webdav/if.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -// The If header is covered by Section 10.4. -// http://www.webdav.org/specs/rfc4918.html#HEADER_If - -import ( - "strings" -) - -// ifHeader is a disjunction (OR) of ifLists. -type ifHeader struct { - lists []ifList -} - -// ifList is a conjunction (AND) of Conditions, and an optional resource tag. -type ifList struct { - resourceTag string - conditions []Condition -} - -// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string -// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is -// returned by req.Header.Get("If") for a http.Request req. -func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { - s := strings.TrimSpace(httpHeader) - switch tokenType, _, _ := lex(s); tokenType { - case '(': - return parseNoTagLists(s) - case angleTokenType: - return parseTaggedLists(s) - default: - return ifHeader{}, false - } -} - -func parseNoTagLists(s string) (h ifHeader, ok bool) { - for { - l, remaining, ok := parseList(s) - if !ok { - return ifHeader{}, false - } - h.lists = append(h.lists, l) - if remaining == "" { - return h, true - } - s = remaining - } -} - -func parseTaggedLists(s string) (h ifHeader, ok bool) { - resourceTag, n := "", 0 - for first := true; ; first = false { - tokenType, tokenStr, remaining := lex(s) - switch tokenType { - case angleTokenType: - if !first && n == 0 { - return ifHeader{}, false - } - resourceTag, n = tokenStr, 0 - s = remaining - case '(': - n++ - l, remaining, ok := parseList(s) - if !ok { - return ifHeader{}, false - } - l.resourceTag = resourceTag - h.lists = append(h.lists, l) - if remaining == "" { - return h, true - } - s = remaining - default: - return ifHeader{}, false - } - } -} - -func parseList(s string) (l ifList, remaining string, ok bool) { - tokenType, _, s := lex(s) - if tokenType != '(' { - return ifList{}, "", false - } - for { - tokenType, _, remaining = lex(s) - if tokenType == ')' { - if len(l.conditions) == 0 { - return ifList{}, "", false - } - return l, remaining, true - } - c, remaining, ok := parseCondition(s) - if !ok { - return ifList{}, "", false - } - l.conditions = append(l.conditions, c) - s = remaining - } -} - -func parseCondition(s string) (c Condition, remaining string, ok bool) { - tokenType, tokenStr, s := lex(s) - if tokenType == notTokenType { - c.Not = true - tokenType, tokenStr, s = lex(s) - } - switch tokenType { - case strTokenType, angleTokenType: - c.Token = tokenStr - case squareTokenType: - c.ETag = tokenStr - default: - return Condition{}, "", false - } - return c, s, true -} - -// Single-rune tokens like '(' or ')' have a token type equal to their rune. -// All other tokens have a negative token type. -const ( - errTokenType = rune(-1) - eofTokenType = rune(-2) - strTokenType = rune(-3) - notTokenType = rune(-4) - angleTokenType = rune(-5) - squareTokenType = rune(-6) -) - -func lex(s string) (tokenType rune, tokenStr string, remaining string) { - // The net/textproto Reader that parses the HTTP header will collapse - // Linear White Space that spans multiple "\r\n" lines to a single " ", - // so we don't need to look for '\r' or '\n'. - for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { - s = s[1:] - } - if len(s) == 0 { - return eofTokenType, "", "" - } - i := 0 -loop: - for ; i < len(s); i++ { - switch s[i] { - case '\t', ' ', '(', ')', '<', '>', '[', ']': - break loop - } - } - - if i != 0 { - tokenStr, remaining = s[:i], s[i:] - if tokenStr == "Not" { - return notTokenType, "", remaining - } - return strTokenType, tokenStr, remaining - } - - j := 0 - switch s[0] { - case '<': - j, tokenType = strings.IndexByte(s, '>'), angleTokenType - case '[': - j, tokenType = strings.IndexByte(s, ']'), squareTokenType - default: - return rune(s[0]), "", s[1:] - } - if j < 0 { - return errTokenType, "", "" - } - return tokenType, s[1:j], s[j+1:] -} diff --git a/vendor/golang.org/x/net/webdav/if_test.go b/vendor/golang.org/x/net/webdav/if_test.go deleted file mode 100644 index aad61a40..00000000 --- a/vendor/golang.org/x/net/webdav/if_test.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -import ( - "reflect" - "strings" - "testing" -) - -func TestParseIfHeader(t *testing.T) { - // The "section x.y.z" test cases come from section x.y.z of the spec at - // http://www.webdav.org/specs/rfc4918.html - testCases := []struct { - desc string - input string - want ifHeader - }{{ - "bad: empty", - ``, - ifHeader{}, - }, { - "bad: no parens", - `foobar`, - ifHeader{}, - }, { - "bad: empty list #1", - `()`, - ifHeader{}, - }, { - "bad: empty list #2", - `(a) (b c) () (d)`, - ifHeader{}, - }, { - "bad: no list after resource #1", - ``, - ifHeader{}, - }, { - "bad: no list after resource #2", - ` (a)`, - ifHeader{}, - }, { - "bad: no list after resource #3", - ` (a) (b) `, - ifHeader{}, - }, { - "bad: no-tag-list followed by tagged-list", - `(a) (b) (c)`, - ifHeader{}, - }, { - "bad: unfinished list", - `(a`, - ifHeader{}, - }, { - "bad: unfinished ETag", - `([b`, - ifHeader{}, - }, { - "bad: unfinished Notted list", - `(Not a`, - ifHeader{}, - }, { - "bad: double Not", - `(Not Not a)`, - ifHeader{}, - }, { - "good: one list with a Token", - `(a)`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Token: `a`, - }}, - }}, - }, - }, { - "good: one list with an ETag", - `([a])`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - ETag: `a`, - }}, - }}, - }, - }, { - "good: one list with three Nots", - `(Not a Not b Not [d])`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Not: true, - Token: `a`, - }, { - Not: true, - Token: `b`, - }, { - Not: true, - ETag: `d`, - }}, - }}, - }, - }, { - "good: two lists", - `(a) (b)`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Token: `a`, - }}, - }, { - conditions: []Condition{{ - Token: `b`, - }}, - }}, - }, - }, { - "good: two Notted lists", - `(Not a) (Not b)`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Not: true, - Token: `a`, - }}, - }, { - conditions: []Condition{{ - Not: true, - Token: `b`, - }}, - }}, - }, - }, { - "section 7.5.1", - ` - ()`, - ifHeader{ - lists: []ifList{{ - resourceTag: `http://www.example.com/users/f/fielding/index.html`, - conditions: []Condition{{ - Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`, - }}, - }}, - }, - }, { - "section 7.5.2 #1", - `()`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, - }}, - }}, - }, - }, { - "section 7.5.2 #2", - ` - ()`, - ifHeader{ - lists: []ifList{{ - resourceTag: `http://example.com/locked/`, - conditions: []Condition{{ - Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, - }}, - }}, - }, - }, { - "section 7.5.2 #3", - ` - ()`, - ifHeader{ - lists: []ifList{{ - resourceTag: `http://example.com/locked/member`, - conditions: []Condition{{ - Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, - }}, - }}, - }, - }, { - "section 9.9.6", - `() - ()`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`, - }}, - }, { - conditions: []Condition{{ - Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`, - }}, - }}, - }, - }, { - "section 9.10.8", - `()`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`, - }}, - }}, - }, - }, { - "section 10.4.6", - `( - ["I am an ETag"]) - (["I am another ETag"])`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, - }, { - ETag: `"I am an ETag"`, - }}, - }, { - conditions: []Condition{{ - ETag: `"I am another ETag"`, - }}, - }}, - }, - }, { - "section 10.4.7", - `(Not - )`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Not: true, - Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, - }, { - Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`, - }}, - }}, - }, - }, { - "section 10.4.8", - `() - (Not )`, - ifHeader{ - lists: []ifList{{ - conditions: []Condition{{ - Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, - }}, - }, { - conditions: []Condition{{ - Not: true, - Token: `DAV:no-lock`, - }}, - }}, - }, - }, { - "section 10.4.9", - ` - ( - [W/"A weak ETag"]) (["strong ETag"])`, - ifHeader{ - lists: []ifList{{ - resourceTag: `/resource1`, - conditions: []Condition{{ - Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, - }, { - ETag: `W/"A weak ETag"`, - }}, - }, { - resourceTag: `/resource1`, - conditions: []Condition{{ - ETag: `"strong ETag"`, - }}, - }}, - }, - }, { - "section 10.4.10", - ` - ()`, - ifHeader{ - lists: []ifList{{ - resourceTag: `http://www.example.com/specs/`, - conditions: []Condition{{ - Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, - }}, - }}, - }, - }, { - "section 10.4.11 #1", - ` (["4217"])`, - ifHeader{ - lists: []ifList{{ - resourceTag: `/specs/rfc2518.doc`, - conditions: []Condition{{ - ETag: `"4217"`, - }}, - }}, - }, - }, { - "section 10.4.11 #2", - ` (Not ["4217"])`, - ifHeader{ - lists: []ifList{{ - resourceTag: `/specs/rfc2518.doc`, - conditions: []Condition{{ - Not: true, - ETag: `"4217"`, - }}, - }}, - }, - }} - - for _, tc := range testCases { - got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1)) - if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok { - t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok) - continue - } - if !reflect.DeepEqual(got, tc.want) { - t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want) - continue - } - } -} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README deleted file mode 100644 index 89656f48..00000000 --- a/vendor/golang.org/x/net/webdav/internal/xml/README +++ /dev/null @@ -1,11 +0,0 @@ -This is a fork of the encoding/xml package at ca1d6c4, the last commit before -https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name -space behavior" made late in the lead-up to the Go 1.5 release. - -The list of encoding/xml changes is at -https://go.googlesource.com/go/+log/master/src/encoding/xml - -This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is -released. - -See http://golang.org/issue/11841 diff --git a/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go deleted file mode 100644 index a7128431..00000000 --- a/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xml - -import "time" - -var atomValue = &Feed{ - XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, - Title: "Example Feed", - Link: []Link{{Href: "http://example.org/"}}, - Updated: ParseTime("2003-12-13T18:30:02Z"), - Author: Person{Name: "John Doe"}, - Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", - - Entry: []Entry{ - { - Title: "Atom-Powered Robots Run Amok", - Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, - Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", - Updated: ParseTime("2003-12-13T18:30:02Z"), - Summary: NewText("Some text."), - }, - }, -} - -var atomXml = `` + - `` + - `Example Feed` + - `urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6` + - `` + - `John Doe` + - `` + - `Atom-Powered Robots Run Amok` + - `urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a` + - `` + - `2003-12-13T18:30:02Z` + - `` + - `Some text.` + - `` + - `` - -func ParseTime(str string) time.Time { - t, err := time.Parse(time.RFC3339, str) - if err != nil { - panic(err) - } - return t -} - -func NewText(text string) Text { - return Text{ - Body: text, - } -} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go deleted file mode 100644 index 21b48dea..00000000 --- a/vendor/golang.org/x/net/webdav/internal/xml/example_test.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xml_test - -import ( - "encoding/xml" - "fmt" - "os" -) - -func ExampleMarshalIndent() { - type Address struct { - City, State string - } - type Person struct { - XMLName xml.Name `xml:"person"` - Id int `xml:"id,attr"` - FirstName string `xml:"name>first"` - LastName string `xml:"name>last"` - Age int `xml:"age"` - Height float32 `xml:"height,omitempty"` - Married bool - Address - Comment string `xml:",comment"` - } - - v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} - v.Comment = " Need more details. " - v.Address = Address{"Hanga Roa", "Easter Island"} - - output, err := xml.MarshalIndent(v, " ", " ") - if err != nil { - fmt.Printf("error: %v\n", err) - } - - os.Stdout.Write(output) - // Output: - // - // - // John - // Doe - // - // 42 - // false - // Hanga Roa - // Easter Island - // - // -} - -func ExampleEncoder() { - type Address struct { - City, State string - } - type Person struct { - XMLName xml.Name `xml:"person"` - Id int `xml:"id,attr"` - FirstName string `xml:"name>first"` - LastName string `xml:"name>last"` - Age int `xml:"age"` - Height float32 `xml:"height,omitempty"` - Married bool - Address - Comment string `xml:",comment"` - } - - v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} - v.Comment = " Need more details. " - v.Address = Address{"Hanga Roa", "Easter Island"} - - enc := xml.NewEncoder(os.Stdout) - enc.Indent(" ", " ") - if err := enc.Encode(v); err != nil { - fmt.Printf("error: %v\n", err) - } - - // Output: - // - // - // John - // Doe - // - // 42 - // false - // Hanga Roa - // Easter Island - // - // -} - -// This example demonstrates unmarshaling an XML excerpt into a value with -// some preset fields. Note that the Phone field isn't modified and that -// the XML element is ignored. Also, the Groups field is assigned -// considering the element path provided in its tag. -func ExampleUnmarshal() { - type Email struct { - Where string `xml:"where,attr"` - Addr string - } - type Address struct { - City, State string - } - type Result struct { - XMLName xml.Name `xml:"Person"` - Name string `xml:"FullName"` - Phone string - Email []Email - Groups []string `xml:"Group>Value"` - Address - } - v := Result{Name: "none", Phone: "none"} - - data := ` - - Grace R. Emlin - Example Inc. - - gre@example.com - - - gre@work.com - - - Friends - Squash - - Hanga Roa - Easter Island - - ` - err := xml.Unmarshal([]byte(data), &v) - if err != nil { - fmt.Printf("error: %v", err) - return - } - fmt.Printf("XMLName: %#v\n", v.XMLName) - fmt.Printf("Name: %q\n", v.Name) - fmt.Printf("Phone: %q\n", v.Phone) - fmt.Printf("Email: %v\n", v.Email) - fmt.Printf("Groups: %v\n", v.Groups) - fmt.Printf("Address: %v\n", v.Address) - // Output: - // XMLName: xml.Name{Space:"", Local:"Person"} - // Name: "Grace R. Emlin" - // Phone: "none" - // Email: [{home gre@example.com} {work gre@work.com}] - // Groups: [Friends Squash] - // Address: {Hanga Roa Easter Island} -} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go deleted file mode 100644 index cb82ec21..00000000 --- a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go +++ /dev/null @@ -1,1223 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xml - -import ( - "bufio" - "bytes" - "encoding" - "fmt" - "io" - "reflect" - "strconv" - "strings" -) - -const ( - // A generic XML header suitable for use with the output of Marshal. - // This is not automatically added to any output of this package, - // it is provided as a convenience. - Header = `` + "\n" -) - -// Marshal returns the XML encoding of v. -// -// Marshal handles an array or slice by marshalling each of the elements. -// Marshal handles a pointer by marshalling the value it points at or, if the -// pointer is nil, by writing nothing. Marshal handles an interface value by -// marshalling the value it contains or, if the interface value is nil, by -// writing nothing. Marshal handles all other data by writing one or more XML -// elements containing the data. -// -// The name for the XML elements is taken from, in order of preference: -// - the tag on the XMLName field, if the data is a struct -// - the value of the XMLName field of type xml.Name -// - the tag of the struct field used to obtain the data -// - the name of the struct field used to obtain the data -// - the name of the marshalled type -// -// The XML element for a struct contains marshalled elements for each of the -// exported fields of the struct, with these exceptions: -// - the XMLName field, described above, is omitted. -// - a field with tag "-" is omitted. -// - a field with tag "name,attr" becomes an attribute with -// the given name in the XML element. -// - a field with tag ",attr" becomes an attribute with the -// field name in the XML element. -// - a field with tag ",chardata" is written as character data, -// not as an XML element. -// - a field with tag ",innerxml" is written verbatim, not subject -// to the usual marshalling procedure. -// - a field with tag ",comment" is written as an XML comment, not -// subject to the usual marshalling procedure. It must not contain -// the "--" string within it. -// - a field with a tag including the "omitempty" option is omitted -// if the field value is empty. The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or -// string of length zero. -// - an anonymous struct field is handled as if the fields of its -// value were part of the outer struct. -// -// If a field uses a tag "a>b>c", then the element c will be nested inside -// parent elements a and b. Fields that appear next to each other that name -// the same parent will be enclosed in one XML element. -// -// See MarshalIndent for an example. -// -// Marshal will return an error if asked to marshal a channel, function, or map. -func Marshal(v interface{}) ([]byte, error) { - var b bytes.Buffer - if err := NewEncoder(&b).Encode(v); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// Marshaler is the interface implemented by objects that can marshal -// themselves into valid XML elements. -// -// MarshalXML encodes the receiver as zero or more XML elements. -// By convention, arrays or slices are typically encoded as a sequence -// of elements, one per entry. -// Using start as the element tag is not required, but doing so -// will enable Unmarshal to match the XML elements to the correct -// struct field. -// One common implementation strategy is to construct a separate -// value with a layout corresponding to the desired XML and then -// to encode it using e.EncodeElement. -// Another common strategy is to use repeated calls to e.EncodeToken -// to generate the XML output one token at a time. -// The sequence of encoded tokens must make up zero or more valid -// XML elements. -type Marshaler interface { - MarshalXML(e *Encoder, start StartElement) error -} - -// MarshalerAttr is the interface implemented by objects that can marshal -// themselves into valid XML attributes. -// -// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. -// Using name as the attribute name is not required, but doing so -// will enable Unmarshal to match the attribute to the correct -// struct field. -// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute -// will be generated in the output. -// MarshalXMLAttr is used only for struct fields with the -// "attr" option in the field tag. -type MarshalerAttr interface { - MarshalXMLAttr(name Name) (Attr, error) -} - -// MarshalIndent works like Marshal, but each XML element begins on a new -// indented line that starts with prefix and is followed by one or more -// copies of indent according to the nesting depth. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - var b bytes.Buffer - enc := NewEncoder(&b) - enc.Indent(prefix, indent) - if err := enc.Encode(v); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// An Encoder writes XML data to an output stream. -type Encoder struct { - p printer -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{printer{Writer: bufio.NewWriter(w)}} - e.p.encoder = e - return e -} - -// Indent sets the encoder to generate XML in which each element -// begins on a new indented line that starts with prefix and is followed by -// one or more copies of indent according to the nesting depth. -func (enc *Encoder) Indent(prefix, indent string) { - enc.p.prefix = prefix - enc.p.indent = indent -} - -// Encode writes the XML encoding of v to the stream. -// -// See the documentation for Marshal for details about the conversion -// of Go values to XML. -// -// Encode calls Flush before returning. -func (enc *Encoder) Encode(v interface{}) error { - err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) - if err != nil { - return err - } - return enc.p.Flush() -} - -// EncodeElement writes the XML encoding of v to the stream, -// using start as the outermost tag in the encoding. -// -// See the documentation for Marshal for details about the conversion -// of Go values to XML. -// -// EncodeElement calls Flush before returning. -func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { - err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) - if err != nil { - return err - } - return enc.p.Flush() -} - -var ( - begComment = []byte("") - endProcInst = []byte("?>") - endDirective = []byte(">") -) - -// EncodeToken writes the given XML token to the stream. -// It returns an error if StartElement and EndElement tokens are not -// properly matched. -// -// EncodeToken does not call Flush, because usually it is part of a -// larger operation such as Encode or EncodeElement (or a custom -// Marshaler's MarshalXML invoked during those), and those will call -// Flush when finished. Callers that create an Encoder and then invoke -// EncodeToken directly, without using Encode or EncodeElement, need to -// call Flush when finished to ensure that the XML is written to the -// underlying writer. -// -// EncodeToken allows writing a ProcInst with Target set to "xml" only -// as the first token in the stream. -// -// When encoding a StartElement holding an XML namespace prefix -// declaration for a prefix that is not already declared, contained -// elements (including the StartElement itself) will use the declared -// prefix when encoding names with matching namespace URIs. -func (enc *Encoder) EncodeToken(t Token) error { - - p := &enc.p - switch t := t.(type) { - case StartElement: - if err := p.writeStart(&t); err != nil { - return err - } - case EndElement: - if err := p.writeEnd(t.Name); err != nil { - return err - } - case CharData: - escapeText(p, t, false) - case Comment: - if bytes.Contains(t, endComment) { - return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") - } - p.WriteString("") - return p.cachedWriteError() - case ProcInst: - // First token to be encoded which is also a ProcInst with target of xml - // is the xml declaration. The only ProcInst where target of xml is allowed. - if t.Target == "xml" && p.Buffered() != 0 { - return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") - } - if !isNameString(t.Target) { - return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") - } - if bytes.Contains(t.Inst, endProcInst) { - return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") - } - p.WriteString(" 0 { - p.WriteByte(' ') - p.Write(t.Inst) - } - p.WriteString("?>") - case Directive: - if !isValidDirective(t) { - return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") - } - p.WriteString("") - default: - return fmt.Errorf("xml: EncodeToken of invalid token type") - - } - return p.cachedWriteError() -} - -// isValidDirective reports whether dir is a valid directive text, -// meaning angle brackets are matched, ignoring comments and strings. -func isValidDirective(dir Directive) bool { - var ( - depth int - inquote uint8 - incomment bool - ) - for i, c := range dir { - switch { - case incomment: - if c == '>' { - if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { - incomment = false - } - } - // Just ignore anything in comment - case inquote != 0: - if c == inquote { - inquote = 0 - } - // Just ignore anything within quotes - case c == '\'' || c == '"': - inquote = c - case c == '<': - if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { - incomment = true - } else { - depth++ - } - case c == '>': - if depth == 0 { - return false - } - depth-- - } - } - return depth == 0 && inquote == 0 && !incomment -} - -// Flush flushes any buffered XML to the underlying writer. -// See the EncodeToken documentation for details about when it is necessary. -func (enc *Encoder) Flush() error { - return enc.p.Flush() -} - -type printer struct { - *bufio.Writer - encoder *Encoder - seq int - indent string - prefix string - depth int - indentedIn bool - putNewline bool - defaultNS string - attrNS map[string]string // map prefix -> name space - attrPrefix map[string]string // map name space -> prefix - prefixes []printerPrefix - tags []Name -} - -// printerPrefix holds a namespace undo record. -// When an element is popped, the prefix record -// is set back to the recorded URL. The empty -// prefix records the URL for the default name space. -// -// The start of an element is recorded with an element -// that has mark=true. -type printerPrefix struct { - prefix string - url string - mark bool -} - -func (p *printer) prefixForNS(url string, isAttr bool) string { - // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" - // and must be referred to that way. - // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", - // but users should not be trying to use that one directly - that's our job.) - if url == xmlURL { - return "xml" - } - if !isAttr && url == p.defaultNS { - // We can use the default name space. - return "" - } - return p.attrPrefix[url] -} - -// defineNS pushes any namespace definition found in the given attribute. -// If ignoreNonEmptyDefault is true, an xmlns="nonempty" -// attribute will be ignored. -func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { - var prefix string - if attr.Name.Local == "xmlns" { - if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { - return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") - } - } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { - prefix = attr.Name.Local - if attr.Value == "" { - // Technically, an empty XML namespace is allowed for an attribute. - // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: - // - // The attribute value in a namespace declaration for a prefix may be - // empty. This has the effect, within the scope of the declaration, of removing - // any association of the prefix with a namespace name. - // - // However our namespace prefixes here are used only as hints. There's - // no need to respect the removal of a namespace prefix, so we ignore it. - return nil - } - } else { - // Ignore: it's not a namespace definition - return nil - } - if prefix == "" { - if attr.Value == p.defaultNS { - // No need for redefinition. - return nil - } - if attr.Value != "" && ignoreNonEmptyDefault { - // We have an xmlns="..." value but - // it can't define a name space in this context, - // probably because the element has an empty - // name space. In this case, we just ignore - // the name space declaration. - return nil - } - } else if _, ok := p.attrPrefix[attr.Value]; ok { - // There's already a prefix for the given name space, - // so use that. This prevents us from - // having two prefixes for the same name space - // so attrNS and attrPrefix can remain bijective. - return nil - } - p.pushPrefix(prefix, attr.Value) - return nil -} - -// createNSPrefix creates a name space prefix attribute -// to use for the given name space, defining a new prefix -// if necessary. -// If isAttr is true, the prefix is to be created for an attribute -// prefix, which means that the default name space cannot -// be used. -func (p *printer) createNSPrefix(url string, isAttr bool) { - if _, ok := p.attrPrefix[url]; ok { - // We already have a prefix for the given URL. - return - } - switch { - case !isAttr && url == p.defaultNS: - // We can use the default name space. - return - case url == "": - // The only way we can encode names in the empty - // name space is by using the default name space, - // so we must use that. - if p.defaultNS != "" { - // The default namespace is non-empty, so we - // need to set it to empty. - p.pushPrefix("", "") - } - return - case url == xmlURL: - return - } - // TODO If the URL is an existing prefix, we could - // use it as is. That would enable the - // marshaling of elements that had been unmarshaled - // and with a name space prefix that was not found. - // although technically it would be incorrect. - - // Pick a name. We try to use the final element of the path - // but fall back to _. - prefix := strings.TrimRight(url, "/") - if i := strings.LastIndex(prefix, "/"); i >= 0 { - prefix = prefix[i+1:] - } - if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { - prefix = "_" - } - if strings.HasPrefix(prefix, "xml") { - // xmlanything is reserved. - prefix = "_" + prefix - } - if p.attrNS[prefix] != "" { - // Name is taken. Find a better one. - for p.seq++; ; p.seq++ { - if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { - prefix = id - break - } - } - } - - p.pushPrefix(prefix, url) -} - -// writeNamespaces writes xmlns attributes for all the -// namespace prefixes that have been defined in -// the current element. -func (p *printer) writeNamespaces() { - for i := len(p.prefixes) - 1; i >= 0; i-- { - prefix := p.prefixes[i] - if prefix.mark { - return - } - p.WriteString(" ") - if prefix.prefix == "" { - // Default name space. - p.WriteString(`xmlns="`) - } else { - p.WriteString("xmlns:") - p.WriteString(prefix.prefix) - p.WriteString(`="`) - } - EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) - p.WriteString(`"`) - } -} - -// pushPrefix pushes a new prefix on the prefix stack -// without checking to see if it is already defined. -func (p *printer) pushPrefix(prefix, url string) { - p.prefixes = append(p.prefixes, printerPrefix{ - prefix: prefix, - url: p.nsForPrefix(prefix), - }) - p.setAttrPrefix(prefix, url) -} - -// nsForPrefix returns the name space for the given -// prefix. Note that this is not valid for the -// empty attribute prefix, which always has an empty -// name space. -func (p *printer) nsForPrefix(prefix string) string { - if prefix == "" { - return p.defaultNS - } - return p.attrNS[prefix] -} - -// markPrefix marks the start of an element on the prefix -// stack. -func (p *printer) markPrefix() { - p.prefixes = append(p.prefixes, printerPrefix{ - mark: true, - }) -} - -// popPrefix pops all defined prefixes for the current -// element. -func (p *printer) popPrefix() { - for len(p.prefixes) > 0 { - prefix := p.prefixes[len(p.prefixes)-1] - p.prefixes = p.prefixes[:len(p.prefixes)-1] - if prefix.mark { - break - } - p.setAttrPrefix(prefix.prefix, prefix.url) - } -} - -// setAttrPrefix sets an attribute name space prefix. -// If url is empty, the attribute is removed. -// If prefix is empty, the default name space is set. -func (p *printer) setAttrPrefix(prefix, url string) { - if prefix == "" { - p.defaultNS = url - return - } - if url == "" { - delete(p.attrPrefix, p.attrNS[prefix]) - delete(p.attrNS, prefix) - return - } - if p.attrPrefix == nil { - // Need to define a new name space. - p.attrPrefix = make(map[string]string) - p.attrNS = make(map[string]string) - } - // Remove any old prefix value. This is OK because we maintain a - // strict one-to-one mapping between prefix and URL (see - // defineNS) - delete(p.attrPrefix, p.attrNS[prefix]) - p.attrPrefix[url] = prefix - p.attrNS[prefix] = url -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() - textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() -) - -// marshalValue writes one or more XML elements representing val. -// If val was obtained from a struct field, finfo must have its details. -func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { - if startTemplate != nil && startTemplate.Name.Local == "" { - return fmt.Errorf("xml: EncodeElement of StartElement with missing name") - } - - if !val.IsValid() { - return nil - } - if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { - return nil - } - - // Drill into interfaces and pointers. - // This can turn into an infinite loop given a cyclic chain, - // but it matches the Go 1 behavior. - for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { - if val.IsNil() { - return nil - } - val = val.Elem() - } - - kind := val.Kind() - typ := val.Type() - - // Check for marshaler. - if val.CanInterface() && typ.Implements(marshalerType) { - return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) - } - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(marshalerType) { - return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) - } - } - - // Check for text marshaler. - if val.CanInterface() && typ.Implements(textMarshalerType) { - return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) - } - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) - } - } - - // Slices and arrays iterate over the elements. They do not have an enclosing tag. - if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { - for i, n := 0, val.Len(); i < n; i++ { - if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { - return err - } - } - return nil - } - - tinfo, err := getTypeInfo(typ) - if err != nil { - return err - } - - // Create start element. - // Precedence for the XML element name is: - // 0. startTemplate - // 1. XMLName field in underlying struct; - // 2. field name/tag in the struct field; and - // 3. type name - var start StartElement - - // explicitNS records whether the element's name space has been - // explicitly set (for example an XMLName field). - explicitNS := false - - if startTemplate != nil { - start.Name = startTemplate.Name - explicitNS = true - start.Attr = append(start.Attr, startTemplate.Attr...) - } else if tinfo.xmlname != nil { - xmlname := tinfo.xmlname - if xmlname.name != "" { - start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name - } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { - start.Name = v - } - explicitNS = true - } - if start.Name.Local == "" && finfo != nil { - start.Name.Local = finfo.name - if finfo.xmlns != "" { - start.Name.Space = finfo.xmlns - explicitNS = true - } - } - if start.Name.Local == "" { - name := typ.Name() - if name == "" { - return &UnsupportedTypeError{typ} - } - start.Name.Local = name - } - - // defaultNS records the default name space as set by a xmlns="..." - // attribute. We don't set p.defaultNS because we want to let - // the attribute writing code (in p.defineNS) be solely responsible - // for maintaining that. - defaultNS := p.defaultNS - - // Attributes - for i := range tinfo.fields { - finfo := &tinfo.fields[i] - if finfo.flags&fAttr == 0 { - continue - } - attr, err := p.fieldAttr(finfo, val) - if err != nil { - return err - } - if attr.Name.Local == "" { - continue - } - start.Attr = append(start.Attr, attr) - if attr.Name.Space == "" && attr.Name.Local == "xmlns" { - defaultNS = attr.Value - } - } - if !explicitNS { - // Historic behavior: elements use the default name space - // they are contained in by default. - start.Name.Space = defaultNS - } - // Historic behaviour: an element that's in a namespace sets - // the default namespace for all elements contained within it. - start.setDefaultNamespace() - - if err := p.writeStart(&start); err != nil { - return err - } - - if val.Kind() == reflect.Struct { - err = p.marshalStruct(tinfo, val) - } else { - s, b, err1 := p.marshalSimple(typ, val) - if err1 != nil { - err = err1 - } else if b != nil { - EscapeText(p, b) - } else { - p.EscapeString(s) - } - } - if err != nil { - return err - } - - if err := p.writeEnd(start.Name); err != nil { - return err - } - - return p.cachedWriteError() -} - -// fieldAttr returns the attribute of the given field. -// If the returned attribute has an empty Name.Local, -// it should not be used. -// The given value holds the value containing the field. -func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { - fv := finfo.value(val) - name := Name{Space: finfo.xmlns, Local: finfo.name} - if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { - return Attr{}, nil - } - if fv.Kind() == reflect.Interface && fv.IsNil() { - return Attr{}, nil - } - if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { - attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) - return attr, err - } - if fv.CanAddr() { - pv := fv.Addr() - if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { - attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) - return attr, err - } - } - if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { - text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return Attr{}, err - } - return Attr{name, string(text)}, nil - } - if fv.CanAddr() { - pv := fv.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return Attr{}, err - } - return Attr{name, string(text)}, nil - } - } - // Dereference or skip nil pointer, interface values. - switch fv.Kind() { - case reflect.Ptr, reflect.Interface: - if fv.IsNil() { - return Attr{}, nil - } - fv = fv.Elem() - } - s, b, err := p.marshalSimple(fv.Type(), fv) - if err != nil { - return Attr{}, err - } - if b != nil { - s = string(b) - } - return Attr{name, s}, nil -} - -// defaultStart returns the default start element to use, -// given the reflect type, field info, and start template. -func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { - var start StartElement - // Precedence for the XML element name is as above, - // except that we do not look inside structs for the first field. - if startTemplate != nil { - start.Name = startTemplate.Name - start.Attr = append(start.Attr, startTemplate.Attr...) - } else if finfo != nil && finfo.name != "" { - start.Name.Local = finfo.name - start.Name.Space = finfo.xmlns - } else if typ.Name() != "" { - start.Name.Local = typ.Name() - } else { - // Must be a pointer to a named type, - // since it has the Marshaler methods. - start.Name.Local = typ.Elem().Name() - } - // Historic behaviour: elements use the name space of - // the element they are contained in by default. - if start.Name.Space == "" { - start.Name.Space = p.defaultNS - } - start.setDefaultNamespace() - return start -} - -// marshalInterface marshals a Marshaler interface value. -func (p *printer) marshalInterface(val Marshaler, start StartElement) error { - // Push a marker onto the tag stack so that MarshalXML - // cannot close the XML tags that it did not open. - p.tags = append(p.tags, Name{}) - n := len(p.tags) - - err := val.MarshalXML(p.encoder, start) - if err != nil { - return err - } - - // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. - if len(p.tags) > n { - return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) - } - p.tags = p.tags[:n-1] - return nil -} - -// marshalTextInterface marshals a TextMarshaler interface value. -func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { - if err := p.writeStart(&start); err != nil { - return err - } - text, err := val.MarshalText() - if err != nil { - return err - } - EscapeText(p, text) - return p.writeEnd(start.Name) -} - -// writeStart writes the given start element. -func (p *printer) writeStart(start *StartElement) error { - if start.Name.Local == "" { - return fmt.Errorf("xml: start tag with no name") - } - - p.tags = append(p.tags, start.Name) - p.markPrefix() - // Define any name spaces explicitly declared in the attributes. - // We do this as a separate pass so that explicitly declared prefixes - // will take precedence over implicitly declared prefixes - // regardless of the order of the attributes. - ignoreNonEmptyDefault := start.Name.Space == "" - for _, attr := range start.Attr { - if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { - return err - } - } - // Define any new name spaces implied by the attributes. - for _, attr := range start.Attr { - name := attr.Name - // From http://www.w3.org/TR/xml-names11/#defaulting - // "Default namespace declarations do not apply directly - // to attribute names; the interpretation of unprefixed - // attributes is determined by the element on which they - // appear." - // This means we don't need to create a new namespace - // when an attribute name space is empty. - if name.Space != "" && !name.isNamespace() { - p.createNSPrefix(name.Space, true) - } - } - p.createNSPrefix(start.Name.Space, false) - - p.writeIndent(1) - p.WriteByte('<') - p.writeName(start.Name, false) - p.writeNamespaces() - for _, attr := range start.Attr { - name := attr.Name - if name.Local == "" || name.isNamespace() { - // Namespaces have already been written by writeNamespaces above. - continue - } - p.WriteByte(' ') - p.writeName(name, true) - p.WriteString(`="`) - p.EscapeString(attr.Value) - p.WriteByte('"') - } - p.WriteByte('>') - return nil -} - -// writeName writes the given name. It assumes -// that p.createNSPrefix(name) has already been called. -func (p *printer) writeName(name Name, isAttr bool) { - if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { - p.WriteString(prefix) - p.WriteByte(':') - } - p.WriteString(name.Local) -} - -func (p *printer) writeEnd(name Name) error { - if name.Local == "" { - return fmt.Errorf("xml: end tag with no name") - } - if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { - return fmt.Errorf("xml: end tag without start tag", name.Local) - } - if top := p.tags[len(p.tags)-1]; top != name { - if top.Local != name.Local { - return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) - } - return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) - } - p.tags = p.tags[:len(p.tags)-1] - - p.writeIndent(-1) - p.WriteByte('<') - p.WriteByte('/') - p.writeName(name, false) - p.WriteByte('>') - p.popPrefix() - return nil -} - -func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { - switch val.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(val.Int(), 10), nil, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(val.Uint(), 10), nil, nil - case reflect.Float32, reflect.Float64: - return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil - case reflect.String: - return val.String(), nil, nil - case reflect.Bool: - return strconv.FormatBool(val.Bool()), nil, nil - case reflect.Array: - if typ.Elem().Kind() != reflect.Uint8 { - break - } - // [...]byte - var bytes []byte - if val.CanAddr() { - bytes = val.Slice(0, val.Len()).Bytes() - } else { - bytes = make([]byte, val.Len()) - reflect.Copy(reflect.ValueOf(bytes), val) - } - return "", bytes, nil - case reflect.Slice: - if typ.Elem().Kind() != reflect.Uint8 { - break - } - // []byte - return "", val.Bytes(), nil - } - return "", nil, &UnsupportedTypeError{typ} -} - -var ddBytes = []byte("--") - -func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { - s := parentStack{p: p} - for i := range tinfo.fields { - finfo := &tinfo.fields[i] - if finfo.flags&fAttr != 0 { - continue - } - vf := finfo.value(val) - - // Dereference or skip nil pointer, interface values. - switch vf.Kind() { - case reflect.Ptr, reflect.Interface: - if !vf.IsNil() { - vf = vf.Elem() - } - } - - switch finfo.flags & fMode { - case fCharData: - if err := s.setParents(&noField, reflect.Value{}); err != nil { - return err - } - if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { - data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - Escape(p, data) - continue - } - if vf.CanAddr() { - pv := vf.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - Escape(p, data) - continue - } - } - var scratch [64]byte - switch vf.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) - case reflect.Float32, reflect.Float64: - Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) - case reflect.Bool: - Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) - case reflect.String: - if err := EscapeText(p, []byte(vf.String())); err != nil { - return err - } - case reflect.Slice: - if elem, ok := vf.Interface().([]byte); ok { - if err := EscapeText(p, elem); err != nil { - return err - } - } - } - continue - - case fComment: - if err := s.setParents(&noField, reflect.Value{}); err != nil { - return err - } - k := vf.Kind() - if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { - return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) - } - if vf.Len() == 0 { - continue - } - p.writeIndent(0) - p.WriteString("" is invalid grammar. Make it "- -->" - p.WriteByte(' ') - } - p.WriteString("-->") - continue - - case fInnerXml: - iface := vf.Interface() - switch raw := iface.(type) { - case []byte: - p.Write(raw) - continue - case string: - p.WriteString(raw) - continue - } - - case fElement, fElement | fAny: - if err := s.setParents(finfo, vf); err != nil { - return err - } - } - if err := p.marshalValue(vf, finfo, nil); err != nil { - return err - } - } - if err := s.setParents(&noField, reflect.Value{}); err != nil { - return err - } - return p.cachedWriteError() -} - -var noField fieldInfo - -// return the bufio Writer's cached write error -func (p *printer) cachedWriteError() error { - _, err := p.Write(nil) - return err -} - -func (p *printer) writeIndent(depthDelta int) { - if len(p.prefix) == 0 && len(p.indent) == 0 { - return - } - if depthDelta < 0 { - p.depth-- - if p.indentedIn { - p.indentedIn = false - return - } - p.indentedIn = false - } - if p.putNewline { - p.WriteByte('\n') - } else { - p.putNewline = true - } - if len(p.prefix) > 0 { - p.WriteString(p.prefix) - } - if len(p.indent) > 0 { - for i := 0; i < p.depth; i++ { - p.WriteString(p.indent) - } - } - if depthDelta > 0 { - p.depth++ - p.indentedIn = true - } -} - -type parentStack struct { - p *printer - xmlns string - parents []string -} - -// setParents sets the stack of current parents to those found in finfo. -// It only writes the start elements if vf holds a non-nil value. -// If finfo is &noField, it pops all elements. -func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { - xmlns := s.p.defaultNS - if finfo.xmlns != "" { - xmlns = finfo.xmlns - } - commonParents := 0 - if xmlns == s.xmlns { - for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { - if finfo.parents[commonParents] != s.parents[commonParents] { - break - } - } - } - // Pop off any parents that aren't in common with the previous field. - for i := len(s.parents) - 1; i >= commonParents; i-- { - if err := s.p.writeEnd(Name{ - Space: s.xmlns, - Local: s.parents[i], - }); err != nil { - return err - } - } - s.parents = finfo.parents - s.xmlns = xmlns - if commonParents >= len(s.parents) { - // No new elements to push. - return nil - } - if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { - // The element is nil, so no need for the start elements. - s.parents = s.parents[:commonParents] - return nil - } - // Push any new parents required. - for _, name := range s.parents[commonParents:] { - start := &StartElement{ - Name: Name{ - Space: s.xmlns, - Local: name, - }, - } - // Set the default name space for parent elements - // to match what we do with other elements. - if s.xmlns != s.p.defaultNS { - start.setDefaultNamespace() - } - if err := s.p.writeStart(start); err != nil { - return err - } - } - return nil -} - -// A MarshalXMLError is returned when Marshal encounters a type -// that cannot be converted into XML. -type UnsupportedTypeError struct { - Type reflect.Type -} - -func (e *UnsupportedTypeError) Error() string { - return "xml: unsupported type: " + e.Type.String() -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go deleted file mode 100644 index 226cfd01..00000000 --- a/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go +++ /dev/null @@ -1,1939 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xml - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "sync" - "testing" - "time" -) - -type DriveType int - -const ( - HyperDrive DriveType = iota - ImprobabilityDrive -) - -type Passenger struct { - Name []string `xml:"name"` - Weight float32 `xml:"weight"` -} - -type Ship struct { - XMLName struct{} `xml:"spaceship"` - - Name string `xml:"name,attr"` - Pilot string `xml:"pilot,attr"` - Drive DriveType `xml:"drive"` - Age uint `xml:"age"` - Passenger []*Passenger `xml:"passenger"` - secret string -} - -type NamedType string - -type Port struct { - XMLName struct{} `xml:"port"` - Type string `xml:"type,attr,omitempty"` - Comment string `xml:",comment"` - Number string `xml:",chardata"` -} - -type Domain struct { - XMLName struct{} `xml:"domain"` - Country string `xml:",attr,omitempty"` - Name []byte `xml:",chardata"` - Comment []byte `xml:",comment"` -} - -type Book struct { - XMLName struct{} `xml:"book"` - Title string `xml:",chardata"` -} - -type Event struct { - XMLName struct{} `xml:"event"` - Year int `xml:",chardata"` -} - -type Movie struct { - XMLName struct{} `xml:"movie"` - Length uint `xml:",chardata"` -} - -type Pi struct { - XMLName struct{} `xml:"pi"` - Approximation float32 `xml:",chardata"` -} - -type Universe struct { - XMLName struct{} `xml:"universe"` - Visible float64 `xml:",chardata"` -} - -type Particle struct { - XMLName struct{} `xml:"particle"` - HasMass bool `xml:",chardata"` -} - -type Departure struct { - XMLName struct{} `xml:"departure"` - When time.Time `xml:",chardata"` -} - -type SecretAgent struct { - XMLName struct{} `xml:"agent"` - Handle string `xml:"handle,attr"` - Identity string - Obfuscate string `xml:",innerxml"` -} - -type NestedItems struct { - XMLName struct{} `xml:"result"` - Items []string `xml:">item"` - Item1 []string `xml:"Items>item1"` -} - -type NestedOrder struct { - XMLName struct{} `xml:"result"` - Field1 string `xml:"parent>c"` - Field2 string `xml:"parent>b"` - Field3 string `xml:"parent>a"` -} - -type MixedNested struct { - XMLName struct{} `xml:"result"` - A string `xml:"parent1>a"` - B string `xml:"b"` - C string `xml:"parent1>parent2>c"` - D string `xml:"parent1>d"` -} - -type NilTest struct { - A interface{} `xml:"parent1>parent2>a"` - B interface{} `xml:"parent1>b"` - C interface{} `xml:"parent1>parent2>c"` -} - -type Service struct { - XMLName struct{} `xml:"service"` - Domain *Domain `xml:"host>domain"` - Port *Port `xml:"host>port"` - Extra1 interface{} - Extra2 interface{} `xml:"host>extra2"` -} - -var nilStruct *Ship - -type EmbedA struct { - EmbedC - EmbedB EmbedB - FieldA string -} - -type EmbedB struct { - FieldB string - *EmbedC -} - -type EmbedC struct { - FieldA1 string `xml:"FieldA>A1"` - FieldA2 string `xml:"FieldA>A2"` - FieldB string - FieldC string -} - -type NameCasing struct { - XMLName struct{} `xml:"casing"` - Xy string - XY string - XyA string `xml:"Xy,attr"` - XYA string `xml:"XY,attr"` -} - -type NamePrecedence struct { - XMLName Name `xml:"Parent"` - FromTag XMLNameWithoutTag `xml:"InTag"` - FromNameVal XMLNameWithoutTag - FromNameTag XMLNameWithTag - InFieldName string -} - -type XMLNameWithTag struct { - XMLName Name `xml:"InXMLNameTag"` - Value string `xml:",chardata"` -} - -type XMLNameWithNSTag struct { - XMLName Name `xml:"ns InXMLNameWithNSTag"` - Value string `xml:",chardata"` -} - -type XMLNameWithoutTag struct { - XMLName Name - Value string `xml:",chardata"` -} - -type NameInField struct { - Foo Name `xml:"ns foo"` -} - -type AttrTest struct { - Int int `xml:",attr"` - Named int `xml:"int,attr"` - Float float64 `xml:",attr"` - Uint8 uint8 `xml:",attr"` - Bool bool `xml:",attr"` - Str string `xml:",attr"` - Bytes []byte `xml:",attr"` -} - -type OmitAttrTest struct { - Int int `xml:",attr,omitempty"` - Named int `xml:"int,attr,omitempty"` - Float float64 `xml:",attr,omitempty"` - Uint8 uint8 `xml:",attr,omitempty"` - Bool bool `xml:",attr,omitempty"` - Str string `xml:",attr,omitempty"` - Bytes []byte `xml:",attr,omitempty"` -} - -type OmitFieldTest struct { - Int int `xml:",omitempty"` - Named int `xml:"int,omitempty"` - Float float64 `xml:",omitempty"` - Uint8 uint8 `xml:",omitempty"` - Bool bool `xml:",omitempty"` - Str string `xml:",omitempty"` - Bytes []byte `xml:",omitempty"` - Ptr *PresenceTest `xml:",omitempty"` -} - -type AnyTest struct { - XMLName struct{} `xml:"a"` - Nested string `xml:"nested>value"` - AnyField AnyHolder `xml:",any"` -} - -type AnyOmitTest struct { - XMLName struct{} `xml:"a"` - Nested string `xml:"nested>value"` - AnyField *AnyHolder `xml:",any,omitempty"` -} - -type AnySliceTest struct { - XMLName struct{} `xml:"a"` - Nested string `xml:"nested>value"` - AnyField []AnyHolder `xml:",any"` -} - -type AnyHolder struct { - XMLName Name - XML string `xml:",innerxml"` -} - -type RecurseA struct { - A string - B *RecurseB -} - -type RecurseB struct { - A *RecurseA - B string -} - -type PresenceTest struct { - Exists *struct{} -} - -type IgnoreTest struct { - PublicSecret string `xml:"-"` -} - -type MyBytes []byte - -type Data struct { - Bytes []byte - Attr []byte `xml:",attr"` - Custom MyBytes -} - -type Plain struct { - V interface{} -} - -type MyInt int - -type EmbedInt struct { - MyInt -} - -type Strings struct { - X []string `xml:"A>B,omitempty"` -} - -type PointerFieldsTest struct { - XMLName Name `xml:"dummy"` - Name *string `xml:"name,attr"` - Age *uint `xml:"age,attr"` - Empty *string `xml:"empty,attr"` - Contents *string `xml:",chardata"` -} - -type ChardataEmptyTest struct { - XMLName Name `xml:"test"` - Contents *string `xml:",chardata"` -} - -type MyMarshalerTest struct { -} - -var _ Marshaler = (*MyMarshalerTest)(nil) - -func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { - e.EncodeToken(start) - e.EncodeToken(CharData([]byte("hello world"))) - e.EncodeToken(EndElement{start.Name}) - return nil -} - -type MyMarshalerAttrTest struct{} - -var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) - -func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { - return Attr{name, "hello world"}, nil -} - -type MyMarshalerValueAttrTest struct{} - -var _ MarshalerAttr = MyMarshalerValueAttrTest{} - -func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) { - return Attr{name, "hello world"}, nil -} - -type MarshalerStruct struct { - Foo MyMarshalerAttrTest `xml:",attr"` -} - -type MarshalerValueStruct struct { - Foo MyMarshalerValueAttrTest `xml:",attr"` -} - -type InnerStruct struct { - XMLName Name `xml:"testns outer"` -} - -type OuterStruct struct { - InnerStruct - IntAttr int `xml:"int,attr"` -} - -type OuterNamedStruct struct { - InnerStruct - XMLName Name `xml:"outerns test"` - IntAttr int `xml:"int,attr"` -} - -type OuterNamedOrderedStruct struct { - XMLName Name `xml:"outerns test"` - InnerStruct - IntAttr int `xml:"int,attr"` -} - -type OuterOuterStruct struct { - OuterStruct -} - -type NestedAndChardata struct { - AB []string `xml:"A>B"` - Chardata string `xml:",chardata"` -} - -type NestedAndComment struct { - AB []string `xml:"A>B"` - Comment string `xml:",comment"` -} - -type XMLNSFieldStruct struct { - Ns string `xml:"xmlns,attr"` - Body string -} - -type NamedXMLNSFieldStruct struct { - XMLName struct{} `xml:"testns test"` - Ns string `xml:"xmlns,attr"` - Body string -} - -type XMLNSFieldStructWithOmitEmpty struct { - Ns string `xml:"xmlns,attr,omitempty"` - Body string -} - -type NamedXMLNSFieldStructWithEmptyNamespace struct { - XMLName struct{} `xml:"test"` - Ns string `xml:"xmlns,attr"` - Body string -} - -type RecursiveXMLNSFieldStruct struct { - Ns string `xml:"xmlns,attr"` - Body *RecursiveXMLNSFieldStruct `xml:",omitempty"` - Text string `xml:",omitempty"` -} - -func ifaceptr(x interface{}) interface{} { - return &x -} - -var ( - nameAttr = "Sarah" - ageAttr = uint(12) - contentsAttr = "lorem ipsum" -) - -// Unless explicitly stated as such (or *Plain), all of the -// tests below are two-way tests. When introducing new tests, -// please try to make them two-way as well to ensure that -// marshalling and unmarshalling are as symmetrical as feasible. -var marshalTests = []struct { - Value interface{} - ExpectXML string - MarshalOnly bool - UnmarshalOnly bool -}{ - // Test nil marshals to nothing - {Value: nil, ExpectXML: ``, MarshalOnly: true}, - {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, - - // Test value types - {Value: &Plain{true}, ExpectXML: `true`}, - {Value: &Plain{false}, ExpectXML: `false`}, - {Value: &Plain{int(42)}, ExpectXML: `42`}, - {Value: &Plain{int8(42)}, ExpectXML: `42`}, - {Value: &Plain{int16(42)}, ExpectXML: `42`}, - {Value: &Plain{int32(42)}, ExpectXML: `42`}, - {Value: &Plain{uint(42)}, ExpectXML: `42`}, - {Value: &Plain{uint8(42)}, ExpectXML: `42`}, - {Value: &Plain{uint16(42)}, ExpectXML: `42`}, - {Value: &Plain{uint32(42)}, ExpectXML: `42`}, - {Value: &Plain{float32(1.25)}, ExpectXML: `1.25`}, - {Value: &Plain{float64(1.25)}, ExpectXML: `1.25`}, - {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `65501`}, - {Value: &Plain{"gopher"}, ExpectXML: `gopher`}, - {Value: &Plain{[]byte("gopher")}, ExpectXML: `gopher`}, - {Value: &Plain{""}, ExpectXML: `</>`}, - {Value: &Plain{[]byte("")}, ExpectXML: `</>`}, - {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `</>`}, - {Value: &Plain{NamedType("potato")}, ExpectXML: `potato`}, - {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `123`}, - {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `123`}, - {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `true`}, - - // Test time. - { - Value: &Plain{time.Unix(1e9, 123456789).UTC()}, - ExpectXML: `2001-09-09T01:46:40.123456789Z`, - }, - - // A pointer to struct{} may be used to test for an element's presence. - { - Value: &PresenceTest{new(struct{})}, - ExpectXML: ``, - }, - { - Value: &PresenceTest{}, - ExpectXML: ``, - }, - - // A pointer to struct{} may be used to test for an element's presence. - { - Value: &PresenceTest{new(struct{})}, - ExpectXML: ``, - }, - { - Value: &PresenceTest{}, - ExpectXML: ``, - }, - - // A []byte field is only nil if the element was not found. - { - Value: &Data{}, - ExpectXML: ``, - UnmarshalOnly: true, - }, - { - Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, - ExpectXML: ``, - UnmarshalOnly: true, - }, - - // Check that []byte works, including named []byte types. - { - Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, - ExpectXML: `abcd`, - }, - - // Test innerxml - { - Value: &SecretAgent{ - Handle: "007", - Identity: "James Bond", - Obfuscate: "", - }, - ExpectXML: `James Bond`, - MarshalOnly: true, - }, - { - Value: &SecretAgent{ - Handle: "007", - Identity: "James Bond", - Obfuscate: "James Bond", - }, - ExpectXML: `James Bond`, - UnmarshalOnly: true, - }, - - // Test structs - {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `443`}, - {Value: &Port{Number: "443"}, ExpectXML: `443`}, - {Value: &Port{Type: ""}, ExpectXML: ``}, - {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `443`}, - {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `443`, MarshalOnly: true}, - {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `google.com&friends`}, - {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `google.com`}, - {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `Pride & Prejudice`}, - {Value: &Event{Year: -3114}, ExpectXML: `-3114`}, - {Value: &Movie{Length: 13440}, ExpectXML: `13440`}, - {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `3.1415927`}, - {Value: &Universe{Visible: 9.3e13}, ExpectXML: `9.3e+13`}, - {Value: &Particle{HasMass: true}, ExpectXML: `true`}, - {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `2013-01-09T00:15:00-09:00`}, - {Value: atomValue, ExpectXML: atomXml}, - { - Value: &Ship{ - Name: "Heart of Gold", - Pilot: "Computer", - Age: 1, - Drive: ImprobabilityDrive, - Passenger: []*Passenger{ - { - Name: []string{"Zaphod", "Beeblebrox"}, - Weight: 7.25, - }, - { - Name: []string{"Trisha", "McMillen"}, - Weight: 5.5, - }, - { - Name: []string{"Ford", "Prefect"}, - Weight: 7, - }, - { - Name: []string{"Arthur", "Dent"}, - Weight: 6.75, - }, - }, - }, - ExpectXML: `` + - `` + strconv.Itoa(int(ImprobabilityDrive)) + `` + - `1` + - `` + - `Zaphod` + - `Beeblebrox` + - `7.25` + - `` + - `` + - `Trisha` + - `McMillen` + - `5.5` + - `` + - `` + - `Ford` + - `Prefect` + - `7` + - `` + - `` + - `Arthur` + - `Dent` + - `6.75` + - `` + - ``, - }, - - // Test a>b - { - Value: &NestedItems{Items: nil, Item1: nil}, - ExpectXML: `` + - `` + - `` + - ``, - }, - { - Value: &NestedItems{Items: []string{}, Item1: []string{}}, - ExpectXML: `` + - `` + - `` + - ``, - MarshalOnly: true, - }, - { - Value: &NestedItems{Items: nil, Item1: []string{"A"}}, - ExpectXML: `` + - `` + - `A` + - `` + - ``, - }, - { - Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, - ExpectXML: `` + - `` + - `A` + - `B` + - `` + - ``, - }, - { - Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, - ExpectXML: `` + - `` + - `A` + - `B` + - `C` + - `` + - ``, - }, - { - Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, - ExpectXML: `` + - `` + - `C` + - `B` + - `A` + - `` + - ``, - }, - { - Value: &NilTest{A: "A", B: nil, C: "C"}, - ExpectXML: `` + - `` + - `A` + - `C` + - `` + - ``, - MarshalOnly: true, // Uses interface{} - }, - { - Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, - ExpectXML: `` + - `A` + - `B` + - `` + - `C` + - `D` + - `` + - ``, - }, - { - Value: &Service{Port: &Port{Number: "80"}}, - ExpectXML: `80`, - }, - { - Value: &Service{}, - ExpectXML: ``, - }, - { - Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, - ExpectXML: `` + - `80` + - `A` + - `B` + - ``, - MarshalOnly: true, - }, - { - Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, - ExpectXML: `` + - `80` + - `example` + - ``, - MarshalOnly: true, - }, - { - Value: &struct { - XMLName struct{} `xml:"space top"` - A string `xml:"x>a"` - B string `xml:"x>b"` - C string `xml:"space x>c"` - C1 string `xml:"space1 x>c"` - D1 string `xml:"space1 x>d"` - E1 string `xml:"x>e"` - }{ - A: "a", - B: "b", - C: "c", - C1: "c1", - D1: "d1", - E1: "e1", - }, - ExpectXML: `` + - `abc` + - `` + - `c1` + - `d1` + - `` + - `` + - `e1` + - `` + - ``, - }, - { - Value: &struct { - XMLName Name - A string `xml:"x>a"` - B string `xml:"x>b"` - C string `xml:"space x>c"` - C1 string `xml:"space1 x>c"` - D1 string `xml:"space1 x>d"` - }{ - XMLName: Name{ - Space: "space0", - Local: "top", - }, - A: "a", - B: "b", - C: "c", - C1: "c1", - D1: "d1", - }, - ExpectXML: `` + - `ab` + - `c` + - `` + - `c1` + - `d1` + - `` + - ``, - }, - { - Value: &struct { - XMLName struct{} `xml:"top"` - B string `xml:"space x>b"` - B1 string `xml:"space1 x>b"` - }{ - B: "b", - B1: "b1", - }, - ExpectXML: `` + - `b` + - `b1` + - ``, - }, - - // Test struct embedding - { - Value: &EmbedA{ - EmbedC: EmbedC{ - FieldA1: "", // Shadowed by A.A - FieldA2: "", // Shadowed by A.A - FieldB: "A.C.B", - FieldC: "A.C.C", - }, - EmbedB: EmbedB{ - FieldB: "A.B.B", - EmbedC: &EmbedC{ - FieldA1: "A.B.C.A1", - FieldA2: "A.B.C.A2", - FieldB: "", // Shadowed by A.B.B - FieldC: "A.B.C.C", - }, - }, - FieldA: "A.A", - }, - ExpectXML: `` + - `A.C.B` + - `A.C.C` + - `` + - `A.B.B` + - `` + - `A.B.C.A1` + - `A.B.C.A2` + - `` + - `A.B.C.C` + - `` + - `A.A` + - ``, - }, - - // Test that name casing matters - { - Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, - ExpectXML: `mixedupper`, - }, - - // Test the order in which the XML element name is chosen - { - Value: &NamePrecedence{ - FromTag: XMLNameWithoutTag{Value: "A"}, - FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, - FromNameTag: XMLNameWithTag{Value: "C"}, - InFieldName: "D", - }, - ExpectXML: `` + - `A` + - `B` + - `C` + - `D` + - ``, - MarshalOnly: true, - }, - { - Value: &NamePrecedence{ - XMLName: Name{Local: "Parent"}, - FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, - FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, - FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, - InFieldName: "D", - }, - ExpectXML: `` + - `A` + - `B` + - `C` + - `D` + - ``, - UnmarshalOnly: true, - }, - - // xml.Name works in a plain field as well. - { - Value: &NameInField{Name{Space: "ns", Local: "foo"}}, - ExpectXML: ``, - }, - { - Value: &NameInField{Name{Space: "ns", Local: "foo"}}, - ExpectXML: ``, - UnmarshalOnly: true, - }, - - // Marshaling zero xml.Name uses the tag or field name. - { - Value: &NameInField{}, - ExpectXML: ``, - MarshalOnly: true, - }, - - // Test attributes - { - Value: &AttrTest{ - Int: 8, - Named: 9, - Float: 23.5, - Uint8: 255, - Bool: true, - Str: "str", - Bytes: []byte("byt"), - }, - ExpectXML: ``, - }, - { - Value: &AttrTest{Bytes: []byte{}}, - ExpectXML: ``, - }, - { - Value: &OmitAttrTest{ - Int: 8, - Named: 9, - Float: 23.5, - Uint8: 255, - Bool: true, - Str: "str", - Bytes: []byte("byt"), - }, - ExpectXML: ``, - }, - { - Value: &OmitAttrTest{}, - ExpectXML: ``, - }, - - // pointer fields - { - Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, - ExpectXML: `lorem ipsum`, - MarshalOnly: true, - }, - - // empty chardata pointer field - { - Value: &ChardataEmptyTest{}, - ExpectXML: ``, - MarshalOnly: true, - }, - - // omitempty on fields - { - Value: &OmitFieldTest{ - Int: 8, - Named: 9, - Float: 23.5, - Uint8: 255, - Bool: true, - Str: "str", - Bytes: []byte("byt"), - Ptr: &PresenceTest{}, - }, - ExpectXML: `` + - `8` + - `9` + - `23.5` + - `255` + - `true` + - `str` + - `byt` + - `` + - ``, - }, - { - Value: &OmitFieldTest{}, - ExpectXML: ``, - }, - - // Test ",any" - { - ExpectXML: `knownunknown`, - Value: &AnyTest{ - Nested: "known", - AnyField: AnyHolder{ - XMLName: Name{Local: "other"}, - XML: "unknown", - }, - }, - }, - { - Value: &AnyTest{Nested: "known", - AnyField: AnyHolder{ - XML: "", - XMLName: Name{Local: "AnyField"}, - }, - }, - ExpectXML: `known`, - }, - { - ExpectXML: `b`, - Value: &AnyOmitTest{ - Nested: "b", - }, - }, - { - ExpectXML: `bei`, - Value: &AnySliceTest{ - Nested: "b", - AnyField: []AnyHolder{ - { - XMLName: Name{Local: "c"}, - XML: "e", - }, - { - XMLName: Name{Space: "f", Local: "g"}, - XML: "i", - }, - }, - }, - }, - { - ExpectXML: `b`, - Value: &AnySliceTest{ - Nested: "b", - }, - }, - - // Test recursive types. - { - Value: &RecurseA{ - A: "a1", - B: &RecurseB{ - A: &RecurseA{"a2", nil}, - B: "b1", - }, - }, - ExpectXML: `a1a2b1`, - }, - - // Test ignoring fields via "-" tag - { - ExpectXML: ``, - Value: &IgnoreTest{}, - }, - { - ExpectXML: ``, - Value: &IgnoreTest{PublicSecret: "can't tell"}, - MarshalOnly: true, - }, - { - ExpectXML: `ignore me`, - Value: &IgnoreTest{}, - UnmarshalOnly: true, - }, - - // Test escaping. - { - ExpectXML: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, - Value: &AnyTest{ - Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, - AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, - }, - }, - { - ExpectXML: `newline: ; cr: ; tab: ;`, - Value: &AnyTest{ - Nested: "newline: \n; cr: \r; tab: \t;", - AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, - }, - }, - { - ExpectXML: "1\r2\r\n3\n\r4\n5", - Value: &AnyTest{ - Nested: "1\n2\n3\n\n4\n5", - }, - UnmarshalOnly: true, - }, - { - ExpectXML: `42`, - Value: &EmbedInt{ - MyInt: 42, - }, - }, - // Test omitempty with parent chain; see golang.org/issue/4168. - { - ExpectXML: ``, - Value: &Strings{}, - }, - // Custom marshalers. - { - ExpectXML: `hello world`, - Value: &MyMarshalerTest{}, - }, - { - ExpectXML: ``, - Value: &MarshalerStruct{}, - }, - { - ExpectXML: ``, - Value: &MarshalerValueStruct{}, - }, - { - ExpectXML: ``, - Value: &OuterStruct{IntAttr: 10}, - }, - { - ExpectXML: ``, - Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, - }, - { - ExpectXML: ``, - Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, - }, - { - ExpectXML: ``, - Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, - }, - { - ExpectXML: `test`, - Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"}, - }, - { - ExpectXML: ``, - Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"}, - }, - { - ExpectXML: `hello world`, - Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, - }, - { - ExpectXML: `hello world`, - Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, - }, - { - ExpectXML: `hello world`, - Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"}, - }, - { - ExpectXML: `hello world`, - Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"}, - }, - { - // The xmlns attribute must be ignored because the - // element is in the empty namespace, so it's not possible - // to set the default namespace to something non-empty. - ExpectXML: `hello world`, - Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"}, - MarshalOnly: true, - }, - { - ExpectXML: `hello world`, - Value: &RecursiveXMLNSFieldStruct{ - Ns: "foo", - Body: &RecursiveXMLNSFieldStruct{ - Text: "hello world", - }, - }, - }, -} - -func TestMarshal(t *testing.T) { - for idx, test := range marshalTests { - if test.UnmarshalOnly { - continue - } - data, err := Marshal(test.Value) - if err != nil { - t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err) - continue - } - if got, want := string(data), test.ExpectXML; got != want { - if strings.Contains(want, "\n") { - t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) - } else { - t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) - } - } - } -} - -type AttrParent struct { - X string `xml:"X>Y,attr"` -} - -type BadAttr struct { - Name []string `xml:"name,attr"` -} - -var marshalErrorTests = []struct { - Value interface{} - Err string - Kind reflect.Kind -}{ - { - Value: make(chan bool), - Err: "xml: unsupported type: chan bool", - Kind: reflect.Chan, - }, - { - Value: map[string]string{ - "question": "What do you get when you multiply six by nine?", - "answer": "42", - }, - Err: "xml: unsupported type: map[string]string", - Kind: reflect.Map, - }, - { - Value: map[*Ship]bool{nil: false}, - Err: "xml: unsupported type: map[*xml.Ship]bool", - Kind: reflect.Map, - }, - { - Value: &Domain{Comment: []byte("f--bar")}, - Err: `xml: comments must not contain "--"`, - }, - // Reject parent chain with attr, never worked; see golang.org/issue/5033. - { - Value: &AttrParent{}, - Err: `xml: X>Y chain not valid with attr flag`, - }, - { - Value: BadAttr{[]string{"X", "Y"}}, - Err: `xml: unsupported type: []string`, - }, -} - -var marshalIndentTests = []struct { - Value interface{} - Prefix string - Indent string - ExpectXML string -}{ - { - Value: &SecretAgent{ - Handle: "007", - Identity: "James Bond", - Obfuscate: "", - }, - Prefix: "", - Indent: "\t", - ExpectXML: fmt.Sprintf("\n\tJames Bond\n"), - }, -} - -func TestMarshalErrors(t *testing.T) { - for idx, test := range marshalErrorTests { - data, err := Marshal(test.Value) - if err == nil { - t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) - continue - } - if err.Error() != test.Err { - t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) - } - if test.Kind != reflect.Invalid { - if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { - t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) - } - } - } -} - -// Do invertibility testing on the various structures that we test -func TestUnmarshal(t *testing.T) { - for i, test := range marshalTests { - if test.MarshalOnly { - continue - } - if _, ok := test.Value.(*Plain); ok { - continue - } - vt := reflect.TypeOf(test.Value) - dest := reflect.New(vt.Elem()).Interface() - err := Unmarshal([]byte(test.ExpectXML), dest) - - switch fix := dest.(type) { - case *Feed: - fix.Author.InnerXML = "" - for i := range fix.Entry { - fix.Entry[i].Author.InnerXML = "" - } - } - - if err != nil { - t.Errorf("#%d: unexpected error: %#v", i, err) - } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { - t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) - } - } -} - -func TestMarshalIndent(t *testing.T) { - for i, test := range marshalIndentTests { - data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) - if err != nil { - t.Errorf("#%d: Error: %s", i, err) - continue - } - if got, want := string(data), test.ExpectXML; got != want { - t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) - } - } -} - -type limitedBytesWriter struct { - w io.Writer - remain int // until writes fail -} - -func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { - if lw.remain <= 0 { - println("error") - return 0, errors.New("write limit hit") - } - if len(p) > lw.remain { - p = p[:lw.remain] - n, _ = lw.w.Write(p) - lw.remain = 0 - return n, errors.New("write limit hit") - } - n, err = lw.w.Write(p) - lw.remain -= n - return n, err -} - -func TestMarshalWriteErrors(t *testing.T) { - var buf bytes.Buffer - const writeCap = 1024 - w := &limitedBytesWriter{&buf, writeCap} - enc := NewEncoder(w) - var err error - var i int - const n = 4000 - for i = 1; i <= n; i++ { - err = enc.Encode(&Passenger{ - Name: []string{"Alice", "Bob"}, - Weight: 5, - }) - if err != nil { - break - } - } - if err == nil { - t.Error("expected an error") - } - if i == n { - t.Errorf("expected to fail before the end") - } - if buf.Len() != writeCap { - t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) - } -} - -func TestMarshalWriteIOErrors(t *testing.T) { - enc := NewEncoder(errWriter{}) - - expectErr := "unwritable" - err := enc.Encode(&Passenger{}) - if err == nil || err.Error() != expectErr { - t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) - } -} - -func TestMarshalFlush(t *testing.T) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - if err := enc.EncodeToken(CharData("hello world")); err != nil { - t.Fatalf("enc.EncodeToken: %v", err) - } - if buf.Len() > 0 { - t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) - } - if err := enc.Flush(); err != nil { - t.Fatalf("enc.Flush: %v", err) - } - if buf.String() != "hello world" { - t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") - } -} - -var encodeElementTests = []struct { - desc string - value interface{} - start StartElement - expectXML string -}{{ - desc: "simple string", - value: "hello", - start: StartElement{ - Name: Name{Local: "a"}, - }, - expectXML: `hello`, -}, { - desc: "string with added attributes", - value: "hello", - start: StartElement{ - Name: Name{Local: "a"}, - Attr: []Attr{{ - Name: Name{Local: "x"}, - Value: "y", - }, { - Name: Name{Local: "foo"}, - Value: "bar", - }}, - }, - expectXML: `hello`, -}, { - desc: "start element with default name space", - value: struct { - Foo XMLNameWithNSTag - }{ - Foo: XMLNameWithNSTag{ - Value: "hello", - }, - }, - start: StartElement{ - Name: Name{Space: "ns", Local: "a"}, - Attr: []Attr{{ - Name: Name{Local: "xmlns"}, - // "ns" is the name space defined in XMLNameWithNSTag - Value: "ns", - }}, - }, - expectXML: `hello`, -}, { - desc: "start element in name space with different default name space", - value: struct { - Foo XMLNameWithNSTag - }{ - Foo: XMLNameWithNSTag{ - Value: "hello", - }, - }, - start: StartElement{ - Name: Name{Space: "ns2", Local: "a"}, - Attr: []Attr{{ - Name: Name{Local: "xmlns"}, - // "ns" is the name space defined in XMLNameWithNSTag - Value: "ns", - }}, - }, - expectXML: `hello`, -}, { - desc: "XMLMarshaler with start element with default name space", - value: &MyMarshalerTest{}, - start: StartElement{ - Name: Name{Space: "ns2", Local: "a"}, - Attr: []Attr{{ - Name: Name{Local: "xmlns"}, - // "ns" is the name space defined in XMLNameWithNSTag - Value: "ns", - }}, - }, - expectXML: `hello world`, -}} - -func TestEncodeElement(t *testing.T) { - for idx, test := range encodeElementTests { - var buf bytes.Buffer - enc := NewEncoder(&buf) - err := enc.EncodeElement(test.value, test.start) - if err != nil { - t.Fatalf("enc.EncodeElement: %v", err) - } - err = enc.Flush() - if err != nil { - t.Fatalf("enc.Flush: %v", err) - } - if got, want := buf.String(), test.expectXML; got != want { - t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want) - } - } -} - -func BenchmarkMarshal(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - Marshal(atomValue) - } -} - -func BenchmarkUnmarshal(b *testing.B) { - b.ReportAllocs() - xml := []byte(atomXml) - for i := 0; i < b.N; i++ { - Unmarshal(xml, &Feed{}) - } -} - -// golang.org/issue/6556 -func TestStructPointerMarshal(t *testing.T) { - type A struct { - XMLName string `xml:"a"` - B []interface{} - } - type C struct { - XMLName Name - Value string `xml:"value"` - } - - a := new(A) - a.B = append(a.B, &C{ - XMLName: Name{Local: "c"}, - Value: "x", - }) - - b, err := Marshal(a) - if err != nil { - t.Fatal(err) - } - if x := string(b); x != "x" { - t.Fatal(x) - } - var v A - err = Unmarshal(b, &v) - if err != nil { - t.Fatal(err) - } -} - -var encodeTokenTests = []struct { - desc string - toks []Token - want string - err string -}{{ - desc: "start element with name space", - toks: []Token{ - StartElement{Name{"space", "local"}, nil}, - }, - want: ``, -}, { - desc: "start element with no name", - toks: []Token{ - StartElement{Name{"space", ""}, nil}, - }, - err: "xml: start tag with no name", -}, { - desc: "end element with no name", - toks: []Token{ - EndElement{Name{"space", ""}}, - }, - err: "xml: end tag with no name", -}, { - desc: "char data", - toks: []Token{ - CharData("foo"), - }, - want: `foo`, -}, { - desc: "char data with escaped chars", - toks: []Token{ - CharData(" \t\n"), - }, - want: " \n", -}, { - desc: "comment", - toks: []Token{ - Comment("foo"), - }, - want: ``, -}, { - desc: "comment with invalid content", - toks: []Token{ - Comment("foo-->"), - }, - err: "xml: EncodeToken of Comment containing --> marker", -}, { - desc: "proc instruction", - toks: []Token{ - ProcInst{"Target", []byte("Instruction")}, - }, - want: ``, -}, { - desc: "proc instruction with empty target", - toks: []Token{ - ProcInst{"", []byte("Instruction")}, - }, - err: "xml: EncodeToken of ProcInst with invalid Target", -}, { - desc: "proc instruction with bad content", - toks: []Token{ - ProcInst{"", []byte("Instruction?>")}, - }, - err: "xml: EncodeToken of ProcInst with invalid Target", -}, { - desc: "directive", - toks: []Token{ - Directive("foo"), - }, - want: ``, -}, { - desc: "more complex directive", - toks: []Token{ - Directive("DOCTYPE doc [ '> ]"), - }, - want: `'> ]>`, -}, { - desc: "directive instruction with bad name", - toks: []Token{ - Directive("foo>"), - }, - err: "xml: EncodeToken of Directive containing wrong < or > markers", -}, { - desc: "end tag without start tag", - toks: []Token{ - EndElement{Name{"foo", "bar"}}, - }, - err: "xml: end tag without start tag", -}, { - desc: "mismatching end tag local name", - toks: []Token{ - StartElement{Name{"", "foo"}, nil}, - EndElement{Name{"", "bar"}}, - }, - err: "xml: end tag does not match start tag ", - want: ``, -}, { - desc: "mismatching end tag namespace", - toks: []Token{ - StartElement{Name{"space", "foo"}, nil}, - EndElement{Name{"another", "foo"}}, - }, - err: "xml: end tag in namespace another does not match start tag in namespace space", - want: ``, -}, { - desc: "start element with explicit namespace", - toks: []Token{ - StartElement{Name{"space", "local"}, []Attr{ - {Name{"xmlns", "x"}, "space"}, - {Name{"space", "foo"}, "value"}, - }}, - }, - want: ``, -}, { - desc: "start element with explicit namespace and colliding prefix", - toks: []Token{ - StartElement{Name{"space", "local"}, []Attr{ - {Name{"xmlns", "x"}, "space"}, - {Name{"space", "foo"}, "value"}, - {Name{"x", "bar"}, "other"}, - }}, - }, - want: ``, -}, { - desc: "start element using previously defined namespace", - toks: []Token{ - StartElement{Name{"", "local"}, []Attr{ - {Name{"xmlns", "x"}, "space"}, - }}, - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"space", "x"}, "y"}, - }}, - }, - want: ``, -}, { - desc: "nested name space with same prefix", - toks: []Token{ - StartElement{Name{"", "foo"}, []Attr{ - {Name{"xmlns", "x"}, "space1"}, - }}, - StartElement{Name{"", "foo"}, []Attr{ - {Name{"xmlns", "x"}, "space2"}, - }}, - StartElement{Name{"", "foo"}, []Attr{ - {Name{"space1", "a"}, "space1 value"}, - {Name{"space2", "b"}, "space2 value"}, - }}, - EndElement{Name{"", "foo"}}, - EndElement{Name{"", "foo"}}, - StartElement{Name{"", "foo"}, []Attr{ - {Name{"space1", "a"}, "space1 value"}, - {Name{"space2", "b"}, "space2 value"}, - }}, - }, - want: ``, -}, { - desc: "start element defining several prefixes for the same name space", - toks: []Token{ - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"xmlns", "a"}, "space"}, - {Name{"xmlns", "b"}, "space"}, - {Name{"space", "x"}, "value"}, - }}, - }, - want: ``, -}, { - desc: "nested element redefines name space", - toks: []Token{ - StartElement{Name{"", "foo"}, []Attr{ - {Name{"xmlns", "x"}, "space"}, - }}, - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"xmlns", "y"}, "space"}, - {Name{"space", "a"}, "value"}, - }}, - }, - want: ``, -}, { - desc: "nested element creates alias for default name space", - toks: []Token{ - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"", "xmlns"}, "space"}, - }}, - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"xmlns", "y"}, "space"}, - {Name{"space", "a"}, "value"}, - }}, - }, - want: ``, -}, { - desc: "nested element defines default name space with existing prefix", - toks: []Token{ - StartElement{Name{"", "foo"}, []Attr{ - {Name{"xmlns", "x"}, "space"}, - }}, - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"", "xmlns"}, "space"}, - {Name{"space", "a"}, "value"}, - }}, - }, - want: ``, -}, { - desc: "nested element uses empty attribute name space when default ns defined", - toks: []Token{ - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"", "xmlns"}, "space"}, - }}, - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"", "attr"}, "value"}, - }}, - }, - want: ``, -}, { - desc: "redefine xmlns", - toks: []Token{ - StartElement{Name{"", "foo"}, []Attr{ - {Name{"foo", "xmlns"}, "space"}, - }}, - }, - err: `xml: cannot redefine xmlns attribute prefix`, -}, { - desc: "xmlns with explicit name space #1", - toks: []Token{ - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"xml", "xmlns"}, "space"}, - }}, - }, - want: ``, -}, { - desc: "xmlns with explicit name space #2", - toks: []Token{ - StartElement{Name{"space", "foo"}, []Attr{ - {Name{xmlURL, "xmlns"}, "space"}, - }}, - }, - want: ``, -}, { - desc: "empty name space declaration is ignored", - toks: []Token{ - StartElement{Name{"", "foo"}, []Attr{ - {Name{"xmlns", "foo"}, ""}, - }}, - }, - want: ``, -}, { - desc: "attribute with no name is ignored", - toks: []Token{ - StartElement{Name{"", "foo"}, []Attr{ - {Name{"", ""}, "value"}, - }}, - }, - want: ``, -}, { - desc: "namespace URL with non-valid name", - toks: []Token{ - StartElement{Name{"/34", "foo"}, []Attr{ - {Name{"/34", "x"}, "value"}, - }}, - }, - want: `<_:foo xmlns:_="/34" _:x="value">`, -}, { - desc: "nested element resets default namespace to empty", - toks: []Token{ - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"", "xmlns"}, "space"}, - }}, - StartElement{Name{"", "foo"}, []Attr{ - {Name{"", "xmlns"}, ""}, - {Name{"", "x"}, "value"}, - {Name{"space", "x"}, "value"}, - }}, - }, - want: ``, -}, { - desc: "nested element requires empty default name space", - toks: []Token{ - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"", "xmlns"}, "space"}, - }}, - StartElement{Name{"", "foo"}, nil}, - }, - want: ``, -}, { - desc: "attribute uses name space from xmlns", - toks: []Token{ - StartElement{Name{"some/space", "foo"}, []Attr{ - {Name{"", "attr"}, "value"}, - {Name{"some/space", "other"}, "other value"}, - }}, - }, - want: ``, -}, { - desc: "default name space should not be used by attributes", - toks: []Token{ - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"", "xmlns"}, "space"}, - {Name{"xmlns", "bar"}, "space"}, - {Name{"space", "baz"}, "foo"}, - }}, - StartElement{Name{"space", "baz"}, nil}, - EndElement{Name{"space", "baz"}}, - EndElement{Name{"space", "foo"}}, - }, - want: ``, -}, { - desc: "default name space not used by attributes, not explicitly defined", - toks: []Token{ - StartElement{Name{"space", "foo"}, []Attr{ - {Name{"", "xmlns"}, "space"}, - {Name{"space", "baz"}, "foo"}, - }}, - StartElement{Name{"space", "baz"}, nil}, - EndElement{Name{"space", "baz"}}, - EndElement{Name{"space", "foo"}}, - }, - want: ``, -}, { - desc: "impossible xmlns declaration", - toks: []Token{ - StartElement{Name{"", "foo"}, []Attr{ - {Name{"", "xmlns"}, "space"}, - }}, - StartElement{Name{"space", "bar"}, []Attr{ - {Name{"space", "attr"}, "value"}, - }}, - }, - want: ``, -}} - -func TestEncodeToken(t *testing.T) { -loop: - for i, tt := range encodeTokenTests { - var buf bytes.Buffer - enc := NewEncoder(&buf) - var err error - for j, tok := range tt.toks { - err = enc.EncodeToken(tok) - if err != nil && j < len(tt.toks)-1 { - t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err) - continue loop - } - } - errorf := func(f string, a ...interface{}) { - t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...)) - } - switch { - case tt.err != "" && err == nil: - errorf(" expected error; got none") - continue - case tt.err == "" && err != nil: - errorf(" got error: %v", err) - continue - case tt.err != "" && err != nil && tt.err != err.Error(): - errorf(" error mismatch; got %v, want %v", err, tt.err) - continue - } - if err := enc.Flush(); err != nil { - errorf(" %v", err) - continue - } - if got := buf.String(); got != tt.want { - errorf("\ngot %v\nwant %v", got, tt.want) - continue - } - } -} - -func TestProcInstEncodeToken(t *testing.T) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - - if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { - t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) - } - - if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { - t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") - } - - if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { - t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") - } -} - -func TestDecodeEncode(t *testing.T) { - var in, out bytes.Buffer - in.WriteString(` - - - -`) - dec := NewDecoder(&in) - enc := NewEncoder(&out) - for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { - err = enc.EncodeToken(tok) - if err != nil { - t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) - } - } -} - -// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race. -func TestRace9796(t *testing.T) { - type A struct{} - type B struct { - C []A `xml:"X>Y"` - } - var wg sync.WaitGroup - for i := 0; i < 2; i++ { - wg.Add(1) - go func() { - Marshal(B{[]A{{}}}) - wg.Done() - }() - } - wg.Wait() -} - -func TestIsValidDirective(t *testing.T) { - testOK := []string{ - "<>", - "< < > >", - "' '>' >", - " ]>", - " '<' ' doc ANY> ]>", - ">>> a < comment --> [ ] >", - } - testKO := []string{ - "<", - ">", - "", - "< > > < < >", - " -->", - "", - "'", - "", - } - for _, s := range testOK { - if !isValidDirective(Directive(s)) { - t.Errorf("Directive %q is expected to be valid", s) - } - } - for _, s := range testKO { - if isValidDirective(Directive(s)) { - t.Errorf("Directive %q is expected to be invalid", s) - } - } -} - -// Issue 11719. EncodeToken used to silently eat tokens with an invalid type. -func TestSimpleUseOfEncodeToken(t *testing.T) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil { - t.Errorf("enc.EncodeToken: pointer type should be rejected") - } - if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil { - t.Errorf("enc.EncodeToken: pointer type should be rejected") - } - if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil { - t.Errorf("enc.EncodeToken: StartElement %s", err) - } - if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil { - t.Errorf("enc.EncodeToken: EndElement %s", err) - } - if err := enc.EncodeToken(Universe{}); err == nil { - t.Errorf("enc.EncodeToken: invalid type not caught") - } - if err := enc.Flush(); err != nil { - t.Errorf("enc.Flush: %s", err) - } - if buf.Len() == 0 { - t.Errorf("enc.EncodeToken: empty buffer") - } - want := "" - if buf.String() != want { - t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String()) - } -} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go deleted file mode 100644 index 4089056a..00000000 --- a/vendor/golang.org/x/net/webdav/internal/xml/read.go +++ /dev/null @@ -1,692 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xml - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" -) - -// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: -// an XML element is an order-dependent collection of anonymous -// values, while a data structure is an order-independent collection -// of named values. -// See package json for a textual representation more suitable -// to data structures. - -// Unmarshal parses the XML-encoded data and stores the result in -// the value pointed to by v, which must be an arbitrary struct, -// slice, or string. Well-formed data that does not fit into v is -// discarded. -// -// Because Unmarshal uses the reflect package, it can only assign -// to exported (upper case) fields. Unmarshal uses a case-sensitive -// comparison to match XML element names to tag values and struct -// field names. -// -// Unmarshal maps an XML element to a struct using the following rules. -// In the rules, the tag of a field refers to the value associated with the -// key 'xml' in the struct field's tag (see the example above). -// -// * If the struct has a field of type []byte or string with tag -// ",innerxml", Unmarshal accumulates the raw XML nested inside the -// element in that field. The rest of the rules still apply. -// -// * If the struct has a field named XMLName of type xml.Name, -// Unmarshal records the element name in that field. -// -// * If the XMLName field has an associated tag of the form -// "name" or "namespace-URL name", the XML element must have -// the given name (and, optionally, name space) or else Unmarshal -// returns an error. -// -// * If the XML element has an attribute whose name matches a -// struct field name with an associated tag containing ",attr" or -// the explicit name in a struct field tag of the form "name,attr", -// Unmarshal records the attribute value in that field. -// -// * If the XML element contains character data, that data is -// accumulated in the first struct field that has tag ",chardata". -// The struct field may have type []byte or string. -// If there is no such field, the character data is discarded. -// -// * If the XML element contains comments, they are accumulated in -// the first struct field that has tag ",comment". The struct -// field may have type []byte or string. If there is no such -// field, the comments are discarded. -// -// * If the XML element contains a sub-element whose name matches -// the prefix of a tag formatted as "a" or "a>b>c", unmarshal -// will descend into the XML structure looking for elements with the -// given names, and will map the innermost elements to that struct -// field. A tag starting with ">" is equivalent to one starting -// with the field name followed by ">". -// -// * If the XML element contains a sub-element whose name matches -// a struct field's XMLName tag and the struct field has no -// explicit name tag as per the previous rule, unmarshal maps -// the sub-element to that struct field. -// -// * If the XML element contains a sub-element whose name matches a -// field without any mode flags (",attr", ",chardata", etc), Unmarshal -// maps the sub-element to that struct field. -// -// * If the XML element contains a sub-element that hasn't matched any -// of the above rules and the struct has a field with tag ",any", -// unmarshal maps the sub-element to that struct field. -// -// * An anonymous struct field is handled as if the fields of its -// value were part of the outer struct. -// -// * A struct field with tag "-" is never unmarshalled into. -// -// Unmarshal maps an XML element to a string or []byte by saving the -// concatenation of that element's character data in the string or -// []byte. The saved []byte is never nil. -// -// Unmarshal maps an attribute value to a string or []byte by saving -// the value in the string or slice. -// -// Unmarshal maps an XML element to a slice by extending the length of -// the slice and mapping the element to the newly created value. -// -// Unmarshal maps an XML element or attribute value to a bool by -// setting it to the boolean value represented by the string. -// -// Unmarshal maps an XML element or attribute value to an integer or -// floating-point field by setting the field to the result of -// interpreting the string value in decimal. There is no check for -// overflow. -// -// Unmarshal maps an XML element to an xml.Name by recording the -// element name. -// -// Unmarshal maps an XML element to a pointer by setting the pointer -// to a freshly allocated value and then mapping the element to that value. -// -func Unmarshal(data []byte, v interface{}) error { - return NewDecoder(bytes.NewReader(data)).Decode(v) -} - -// Decode works like xml.Unmarshal, except it reads the decoder -// stream to find the start element. -func (d *Decoder) Decode(v interface{}) error { - return d.DecodeElement(v, nil) -} - -// DecodeElement works like xml.Unmarshal except that it takes -// a pointer to the start XML element to decode into v. -// It is useful when a client reads some raw XML tokens itself -// but also wants to defer to Unmarshal for some elements. -func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { - val := reflect.ValueOf(v) - if val.Kind() != reflect.Ptr { - return errors.New("non-pointer passed to Unmarshal") - } - return d.unmarshal(val.Elem(), start) -} - -// An UnmarshalError represents an error in the unmarshalling process. -type UnmarshalError string - -func (e UnmarshalError) Error() string { return string(e) } - -// Unmarshaler is the interface implemented by objects that can unmarshal -// an XML element description of themselves. -// -// UnmarshalXML decodes a single XML element -// beginning with the given start element. -// If it returns an error, the outer call to Unmarshal stops and -// returns that error. -// UnmarshalXML must consume exactly one XML element. -// One common implementation strategy is to unmarshal into -// a separate value with a layout matching the expected XML -// using d.DecodeElement, and then to copy the data from -// that value into the receiver. -// Another common strategy is to use d.Token to process the -// XML object one token at a time. -// UnmarshalXML may not use d.RawToken. -type Unmarshaler interface { - UnmarshalXML(d *Decoder, start StartElement) error -} - -// UnmarshalerAttr is the interface implemented by objects that can unmarshal -// an XML attribute description of themselves. -// -// UnmarshalXMLAttr decodes a single XML attribute. -// If it returns an error, the outer call to Unmarshal stops and -// returns that error. -// UnmarshalXMLAttr is used only for struct fields with the -// "attr" option in the field tag. -type UnmarshalerAttr interface { - UnmarshalXMLAttr(attr Attr) error -} - -// receiverType returns the receiver type to use in an expression like "%s.MethodName". -func receiverType(val interface{}) string { - t := reflect.TypeOf(val) - if t.Name() != "" { - return t.String() - } - return "(" + t.String() + ")" -} - -// unmarshalInterface unmarshals a single XML element into val. -// start is the opening tag of the element. -func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { - // Record that decoder must stop at end tag corresponding to start. - p.pushEOF() - - p.unmarshalDepth++ - err := val.UnmarshalXML(p, *start) - p.unmarshalDepth-- - if err != nil { - p.popEOF() - return err - } - - if !p.popEOF() { - return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) - } - - return nil -} - -// unmarshalTextInterface unmarshals a single XML element into val. -// The chardata contained in the element (but not its children) -// is passed to the text unmarshaler. -func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { - var buf []byte - depth := 1 - for depth > 0 { - t, err := p.Token() - if err != nil { - return err - } - switch t := t.(type) { - case CharData: - if depth == 1 { - buf = append(buf, t...) - } - case StartElement: - depth++ - case EndElement: - depth-- - } - } - return val.UnmarshalText(buf) -} - -// unmarshalAttr unmarshals a single XML attribute into val. -func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { - if val.Kind() == reflect.Ptr { - if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) - } - val = val.Elem() - } - - if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { - // This is an unmarshaler with a non-pointer receiver, - // so it's likely to be incorrect, but we do what we're told. - return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) - } - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { - return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) - } - } - - // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. - if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { - // This is an unmarshaler with a non-pointer receiver, - // so it's likely to be incorrect, but we do what we're told. - return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) - } - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) - } - } - - copyValue(val, []byte(attr.Value)) - return nil -} - -var ( - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() - unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() - textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -) - -// Unmarshal a single XML element into val. -func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { - // Find start element if we need it. - if start == nil { - for { - tok, err := p.Token() - if err != nil { - return err - } - if t, ok := tok.(StartElement); ok { - start = &t - break - } - } - } - - // Load value from interface, but only if the result will be - // usefully addressable. - if val.Kind() == reflect.Interface && !val.IsNil() { - e := val.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() { - val = e - } - } - - if val.Kind() == reflect.Ptr { - if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) - } - val = val.Elem() - } - - if val.CanInterface() && val.Type().Implements(unmarshalerType) { - // This is an unmarshaler with a non-pointer receiver, - // so it's likely to be incorrect, but we do what we're told. - return p.unmarshalInterface(val.Interface().(Unmarshaler), start) - } - - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { - return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) - } - } - - if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { - return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) - } - - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) - } - } - - var ( - data []byte - saveData reflect.Value - comment []byte - saveComment reflect.Value - saveXML reflect.Value - saveXMLIndex int - saveXMLData []byte - saveAny reflect.Value - sv reflect.Value - tinfo *typeInfo - err error - ) - - switch v := val; v.Kind() { - default: - return errors.New("unknown type " + v.Type().String()) - - case reflect.Interface: - // TODO: For now, simply ignore the field. In the near - // future we may choose to unmarshal the start - // element on it, if not nil. - return p.Skip() - - case reflect.Slice: - typ := v.Type() - if typ.Elem().Kind() == reflect.Uint8 { - // []byte - saveData = v - break - } - - // Slice of element values. - // Grow slice. - n := v.Len() - if n >= v.Cap() { - ncap := 2 * n - if ncap < 4 { - ncap = 4 - } - new := reflect.MakeSlice(typ, n, ncap) - reflect.Copy(new, v) - v.Set(new) - } - v.SetLen(n + 1) - - // Recur to read element into slice. - if err := p.unmarshal(v.Index(n), start); err != nil { - v.SetLen(n) - return err - } - return nil - - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: - saveData = v - - case reflect.Struct: - typ := v.Type() - if typ == nameType { - v.Set(reflect.ValueOf(start.Name)) - break - } - - sv = v - tinfo, err = getTypeInfo(typ) - if err != nil { - return err - } - - // Validate and assign element name. - if tinfo.xmlname != nil { - finfo := tinfo.xmlname - if finfo.name != "" && finfo.name != start.Name.Local { - return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") - } - if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { - e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " - if start.Name.Space == "" { - e += "no name space" - } else { - e += start.Name.Space - } - return UnmarshalError(e) - } - fv := finfo.value(sv) - if _, ok := fv.Interface().(Name); ok { - fv.Set(reflect.ValueOf(start.Name)) - } - } - - // Assign attributes. - // Also, determine whether we need to save character data or comments. - for i := range tinfo.fields { - finfo := &tinfo.fields[i] - switch finfo.flags & fMode { - case fAttr: - strv := finfo.value(sv) - // Look for attribute. - for _, a := range start.Attr { - if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { - if err := p.unmarshalAttr(strv, a); err != nil { - return err - } - break - } - } - - case fCharData: - if !saveData.IsValid() { - saveData = finfo.value(sv) - } - - case fComment: - if !saveComment.IsValid() { - saveComment = finfo.value(sv) - } - - case fAny, fAny | fElement: - if !saveAny.IsValid() { - saveAny = finfo.value(sv) - } - - case fInnerXml: - if !saveXML.IsValid() { - saveXML = finfo.value(sv) - if p.saved == nil { - saveXMLIndex = 0 - p.saved = new(bytes.Buffer) - } else { - saveXMLIndex = p.savedOffset() - } - } - } - } - } - - // Find end element. - // Process sub-elements along the way. -Loop: - for { - var savedOffset int - if saveXML.IsValid() { - savedOffset = p.savedOffset() - } - tok, err := p.Token() - if err != nil { - return err - } - switch t := tok.(type) { - case StartElement: - consumed := false - if sv.IsValid() { - consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) - if err != nil { - return err - } - if !consumed && saveAny.IsValid() { - consumed = true - if err := p.unmarshal(saveAny, &t); err != nil { - return err - } - } - } - if !consumed { - if err := p.Skip(); err != nil { - return err - } - } - - case EndElement: - if saveXML.IsValid() { - saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] - if saveXMLIndex == 0 { - p.saved = nil - } - } - break Loop - - case CharData: - if saveData.IsValid() { - data = append(data, t...) - } - - case Comment: - if saveComment.IsValid() { - comment = append(comment, t...) - } - } - } - - if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { - if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return err - } - saveData = reflect.Value{} - } - - if saveData.IsValid() && saveData.CanAddr() { - pv := saveData.Addr() - if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return err - } - saveData = reflect.Value{} - } - } - - if err := copyValue(saveData, data); err != nil { - return err - } - - switch t := saveComment; t.Kind() { - case reflect.String: - t.SetString(string(comment)) - case reflect.Slice: - t.Set(reflect.ValueOf(comment)) - } - - switch t := saveXML; t.Kind() { - case reflect.String: - t.SetString(string(saveXMLData)) - case reflect.Slice: - t.Set(reflect.ValueOf(saveXMLData)) - } - - return nil -} - -func copyValue(dst reflect.Value, src []byte) (err error) { - dst0 := dst - - if dst.Kind() == reflect.Ptr { - if dst.IsNil() { - dst.Set(reflect.New(dst.Type().Elem())) - } - dst = dst.Elem() - } - - // Save accumulated data. - switch dst.Kind() { - case reflect.Invalid: - // Probably a comment. - default: - return errors.New("cannot unmarshal into " + dst0.Type().String()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) - if err != nil { - return err - } - dst.SetInt(itmp) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) - if err != nil { - return err - } - dst.SetUint(utmp) - case reflect.Float32, reflect.Float64: - ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) - if err != nil { - return err - } - dst.SetFloat(ftmp) - case reflect.Bool: - value, err := strconv.ParseBool(strings.TrimSpace(string(src))) - if err != nil { - return err - } - dst.SetBool(value) - case reflect.String: - dst.SetString(string(src)) - case reflect.Slice: - if len(src) == 0 { - // non-nil to flag presence - src = []byte{} - } - dst.SetBytes(src) - } - return nil -} - -// unmarshalPath walks down an XML structure looking for wanted -// paths, and calls unmarshal on them. -// The consumed result tells whether XML elements have been consumed -// from the Decoder until start's matching end element, or if it's -// still untouched because start is uninteresting for sv's fields. -func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { - recurse := false -Loop: - for i := range tinfo.fields { - finfo := &tinfo.fields[i] - if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { - continue - } - for j := range parents { - if parents[j] != finfo.parents[j] { - continue Loop - } - } - if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { - // It's a perfect match, unmarshal the field. - return true, p.unmarshal(finfo.value(sv), start) - } - if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { - // It's a prefix for the field. Break and recurse - // since it's not ok for one field path to be itself - // the prefix for another field path. - recurse = true - - // We can reuse the same slice as long as we - // don't try to append to it. - parents = finfo.parents[:len(parents)+1] - break - } - } - if !recurse { - // We have no business with this element. - return false, nil - } - // The element is not a perfect match for any field, but one - // or more fields have the path to this element as a parent - // prefix. Recurse and attempt to match these. - for { - var tok Token - tok, err = p.Token() - if err != nil { - return true, err - } - switch t := tok.(type) { - case StartElement: - consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) - if err != nil { - return true, err - } - if !consumed2 { - if err := p.Skip(); err != nil { - return true, err - } - } - case EndElement: - return true, nil - } - } -} - -// Skip reads tokens until it has consumed the end element -// matching the most recent start element already consumed. -// It recurs if it encounters a start element, so it can be used to -// skip nested structures. -// It returns nil if it finds an end element matching the start -// element; otherwise it returns an error describing the problem. -func (d *Decoder) Skip() error { - for { - tok, err := d.Token() - if err != nil { - return err - } - switch tok.(type) { - case StartElement: - if err := d.Skip(); err != nil { - return err - } - case EndElement: - return nil - } - } -} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read_test.go b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go deleted file mode 100644 index 02f1e10c..00000000 --- a/vendor/golang.org/x/net/webdav/internal/xml/read_test.go +++ /dev/null @@ -1,744 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xml - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" - "testing" - "time" -) - -// Stripped down Atom feed data structures. - -func TestUnmarshalFeed(t *testing.T) { - var f Feed - if err := Unmarshal([]byte(atomFeedString), &f); err != nil { - t.Fatalf("Unmarshal: %s", err) - } - if !reflect.DeepEqual(f, atomFeed) { - t.Fatalf("have %#v\nwant %#v", f, atomFeed) - } -} - -// hget http://codereview.appspot.com/rss/mine/rsc -const atomFeedString = ` - -Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub -2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a - An attempt at adding pubsubhubbub support to Rietveld. -http://code.google.com/p/pubsubhubbub -http://code.google.com/p/rietveld/issues/detail?id=155 - -The server side of the protocol is trivial: - 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all - feeds that will be pubsubhubbubbed. - 2. every time one of those feeds changes, tell the hub - with a simple POST request. - -I have tested this by adding debug prints to a local hub -server and checking that the server got the right publish -requests. - -I can&#39;t quite get the server to work, but I think the bug -is not in my code. I think that the server expects to be -able to grab the feed and see the feed&#39;s actual URL in -the link rel=&quot;self&quot;, but the default value for that drops -the :port from the URL, and I cannot for the life of me -figure out how to get the Atom generator deep inside -django not to do that, or even where it is doing that, -or even what code is running to generate the Atom feed. -(I thought I knew but I added some assert False statements -and it kept running!) - -Ignoring that particular problem, I would appreciate -feedback on the right way to get the two values at -the top of feeds.py marked NOTE(rsc). - - -rietveld: correct tab handling -2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a - This fixes the buggy tab rendering that can be seen at -http://codereview.appspot.com/116075/diff/1/2 - -The fundamental problem was that the tab code was -not being told what column the text began in, so it -didn&#39;t know where to put the tab stops. Another problem -was that some of the code assumed that string byte -offsets were the same as column offsets, which is only -true if there are no tabs. - -In the process of fixing this, I cleaned up the arguments -to Fold and ExpandTabs and renamed them Break and -_ExpandTabs so that I could be sure that I found all the -call sites. I also wanted to verify that ExpandTabs was -not being used from outside intra_region_diff.py. - - - ` - -type Feed struct { - XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` - Title string `xml:"title"` - Id string `xml:"id"` - Link []Link `xml:"link"` - Updated time.Time `xml:"updated,attr"` - Author Person `xml:"author"` - Entry []Entry `xml:"entry"` -} - -type Entry struct { - Title string `xml:"title"` - Id string `xml:"id"` - Link []Link `xml:"link"` - Updated time.Time `xml:"updated"` - Author Person `xml:"author"` - Summary Text `xml:"summary"` -} - -type Link struct { - Rel string `xml:"rel,attr,omitempty"` - Href string `xml:"href,attr"` -} - -type Person struct { - Name string `xml:"name"` - URI string `xml:"uri"` - Email string `xml:"email"` - InnerXML string `xml:",innerxml"` -} - -type Text struct { - Type string `xml:"type,attr,omitempty"` - Body string `xml:",chardata"` -} - -var atomFeed = Feed{ - XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, - Title: "Code Review - My issues", - Link: []Link{ - {Rel: "alternate", Href: "http://codereview.appspot.com/"}, - {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, - }, - Id: "http://codereview.appspot.com/", - Updated: ParseTime("2009-10-04T01:35:58+00:00"), - Author: Person{ - Name: "rietveld<>", - InnerXML: "rietveld<>", - }, - Entry: []Entry{ - { - Title: "rietveld: an attempt at pubsubhubbub\n", - Link: []Link{ - {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, - }, - Updated: ParseTime("2009-10-04T01:35:58+00:00"), - Author: Person{ - Name: "email-address-removed", - InnerXML: "email-address-removed", - }, - Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", - Summary: Text{ - Type: "html", - Body: ` - An attempt at adding pubsubhubbub support to Rietveld. -http://code.google.com/p/pubsubhubbub -http://code.google.com/p/rietveld/issues/detail?id=155 - -The server side of the protocol is trivial: - 1. add a <link rel="hub" href="hub-server"> tag to all - feeds that will be pubsubhubbubbed. - 2. every time one of those feeds changes, tell the hub - with a simple POST request. - -I have tested this by adding debug prints to a local hub -server and checking that the server got the right publish -requests. - -I can't quite get the server to work, but I think the bug -is not in my code. I think that the server expects to be -able to grab the feed and see the feed's actual URL in -the link rel="self", but the default value for that drops -the :port from the URL, and I cannot for the life of me -figure out how to get the Atom generator deep inside -django not to do that, or even where it is doing that, -or even what code is running to generate the Atom feed. -(I thought I knew but I added some assert False statements -and it kept running!) - -Ignoring that particular problem, I would appreciate -feedback on the right way to get the two values at -the top of feeds.py marked NOTE(rsc). - - -`, - }, - }, - { - Title: "rietveld: correct tab handling\n", - Link: []Link{ - {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, - }, - Updated: ParseTime("2009-10-03T23:02:17+00:00"), - Author: Person{ - Name: "email-address-removed", - InnerXML: "email-address-removed", - }, - Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", - Summary: Text{ - Type: "html", - Body: ` - This fixes the buggy tab rendering that can be seen at -http://codereview.appspot.com/116075/diff/1/2 - -The fundamental problem was that the tab code was -not being told what column the text began in, so it -didn't know where to put the tab stops. Another problem -was that some of the code assumed that string byte -offsets were the same as column offsets, which is only -true if there are no tabs. - -In the process of fixing this, I cleaned up the arguments -to Fold and ExpandTabs and renamed them Break and -_ExpandTabs so that I could be sure that I found all the -call sites. I also wanted to verify that ExpandTabs was -not being used from outside intra_region_diff.py. - - -`, - }, - }, - }, -} - -const pathTestString = ` - - 1 - - - A - - - B - - - C - D - - <_> - E - - - 2 - -` - -type PathTestItem struct { - Value string -} - -type PathTestA struct { - Items []PathTestItem `xml:">Item1"` - Before, After string -} - -type PathTestB struct { - Other []PathTestItem `xml:"Items>Item1"` - Before, After string -} - -type PathTestC struct { - Values1 []string `xml:"Items>Item1>Value"` - Values2 []string `xml:"Items>Item2>Value"` - Before, After string -} - -type PathTestSet struct { - Item1 []PathTestItem -} - -type PathTestD struct { - Other PathTestSet `xml:"Items"` - Before, After string -} - -type PathTestE struct { - Underline string `xml:"Items>_>Value"` - Before, After string -} - -var pathTests = []interface{}{ - &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, - &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, - &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, - &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, - &PathTestE{Underline: "E", Before: "1", After: "2"}, -} - -func TestUnmarshalPaths(t *testing.T) { - for _, pt := range pathTests { - v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() - if err := Unmarshal([]byte(pathTestString), v); err != nil { - t.Fatalf("Unmarshal: %s", err) - } - if !reflect.DeepEqual(v, pt) { - t.Fatalf("have %#v\nwant %#v", v, pt) - } - } -} - -type BadPathTestA struct { - First string `xml:"items>item1"` - Other string `xml:"items>item2"` - Second string `xml:"items"` -} - -type BadPathTestB struct { - Other string `xml:"items>item2>value"` - First string `xml:"items>item1"` - Second string `xml:"items>item1>value"` -} - -type BadPathTestC struct { - First string - Second string `xml:"First"` -} - -type BadPathTestD struct { - BadPathEmbeddedA - BadPathEmbeddedB -} - -type BadPathEmbeddedA struct { - First string -} - -type BadPathEmbeddedB struct { - Second string `xml:"First"` -} - -var badPathTests = []struct { - v, e interface{} -}{ - {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, - {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, - {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, - {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, -} - -func TestUnmarshalBadPaths(t *testing.T) { - for _, tt := range badPathTests { - err := Unmarshal([]byte(pathTestString), tt.v) - if !reflect.DeepEqual(err, tt.e) { - t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) - } - } -} - -const OK = "OK" -const withoutNameTypeData = ` - -` - -type TestThree struct { - XMLName Name `xml:"Test3"` - Attr string `xml:",attr"` -} - -func TestUnmarshalWithoutNameType(t *testing.T) { - var x TestThree - if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { - t.Fatalf("Unmarshal: %s", err) - } - if x.Attr != OK { - t.Fatalf("have %v\nwant %v", x.Attr, OK) - } -} - -func TestUnmarshalAttr(t *testing.T) { - type ParamVal struct { - Int int `xml:"int,attr"` - } - - type ParamPtr struct { - Int *int `xml:"int,attr"` - } - - type ParamStringPtr struct { - Int *string `xml:"int,attr"` - } - - x := []byte(``) - - p1 := &ParamPtr{} - if err := Unmarshal(x, p1); err != nil { - t.Fatalf("Unmarshal: %s", err) - } - if p1.Int == nil { - t.Fatalf("Unmarshal failed in to *int field") - } else if *p1.Int != 1 { - t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) - } - - p2 := &ParamVal{} - if err := Unmarshal(x, p2); err != nil { - t.Fatalf("Unmarshal: %s", err) - } - if p2.Int != 1 { - t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) - } - - p3 := &ParamStringPtr{} - if err := Unmarshal(x, p3); err != nil { - t.Fatalf("Unmarshal: %s", err) - } - if p3.Int == nil { - t.Fatalf("Unmarshal failed in to *string field") - } else if *p3.Int != "1" { - t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) - } -} - -type Tables struct { - HTable string `xml:"http://www.w3.org/TR/html4/ table"` - FTable string `xml:"http://www.w3schools.com/furniture table"` -} - -var tables = []struct { - xml string - tab Tables - ns string -}{ - { - xml: `` + - `hello
    ` + - `world
    ` + - `
    `, - tab: Tables{"hello", "world"}, - }, - { - xml: `` + - `world
    ` + - `hello
    ` + - `
    `, - tab: Tables{"hello", "world"}, - }, - { - xml: `` + - `world` + - `hello` + - ``, - tab: Tables{"hello", "world"}, - }, - { - xml: `` + - `bogus
    ` + - `
    `, - tab: Tables{}, - }, - { - xml: `` + - `only
    ` + - `
    `, - tab: Tables{HTable: "only"}, - ns: "http://www.w3.org/TR/html4/", - }, - { - xml: `` + - `only
    ` + - `
    `, - tab: Tables{FTable: "only"}, - ns: "http://www.w3schools.com/furniture", - }, - { - xml: `` + - `only
    ` + - `
    `, - tab: Tables{}, - ns: "something else entirely", - }, -} - -func TestUnmarshalNS(t *testing.T) { - for i, tt := range tables { - var dst Tables - var err error - if tt.ns != "" { - d := NewDecoder(strings.NewReader(tt.xml)) - d.DefaultSpace = tt.ns - err = d.Decode(&dst) - } else { - err = Unmarshal([]byte(tt.xml), &dst) - } - if err != nil { - t.Errorf("#%d: Unmarshal: %v", i, err) - continue - } - want := tt.tab - if dst != want { - t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) - } - } -} - -func TestRoundTrip(t *testing.T) { - // From issue 7535 - const s = `` - in := bytes.NewBufferString(s) - for i := 0; i < 10; i++ { - out := &bytes.Buffer{} - d := NewDecoder(in) - e := NewEncoder(out) - - for { - t, err := d.Token() - if err == io.EOF { - break - } - if err != nil { - fmt.Println("failed:", err) - return - } - e.EncodeToken(t) - } - e.Flush() - in = out - } - if got := in.String(); got != s { - t.Errorf("have: %q\nwant: %q\n", got, s) - } -} - -func TestMarshalNS(t *testing.T) { - dst := Tables{"hello", "world"} - data, err := Marshal(&dst) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - want := `hello
    world
    ` - str := string(data) - if str != want { - t.Errorf("have: %q\nwant: %q\n", str, want) - } -} - -type TableAttrs struct { - TAttr TAttr -} - -type TAttr struct { - HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` - FTable string `xml:"http://www.w3schools.com/furniture table,attr"` - Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` - Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` - Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` - Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` - Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` -} - -var tableAttrs = []struct { - xml string - tab TableAttrs - ns string -}{ - { - xml: ``, - tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, - }, - { - xml: ``, - tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, - }, - { - xml: ``, - tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, - }, - { - // Default space does not apply to attribute names. - xml: ``, - tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, - }, - { - // Default space does not apply to attribute names. - xml: ``, - tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, - }, - { - xml: ``, - tab: TableAttrs{}, - }, - { - // Default space does not apply to attribute names. - xml: ``, - tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, - ns: "http://www.w3schools.com/furniture", - }, - { - // Default space does not apply to attribute names. - xml: ``, - tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, - ns: "http://www.w3.org/TR/html4/", - }, - { - xml: ``, - tab: TableAttrs{}, - ns: "something else entirely", - }, -} - -func TestUnmarshalNSAttr(t *testing.T) { - for i, tt := range tableAttrs { - var dst TableAttrs - var err error - if tt.ns != "" { - d := NewDecoder(strings.NewReader(tt.xml)) - d.DefaultSpace = tt.ns - err = d.Decode(&dst) - } else { - err = Unmarshal([]byte(tt.xml), &dst) - } - if err != nil { - t.Errorf("#%d: Unmarshal: %v", i, err) - continue - } - want := tt.tab - if dst != want { - t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) - } - } -} - -func TestMarshalNSAttr(t *testing.T) { - src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} - data, err := Marshal(&src) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - want := `` - str := string(data) - if str != want { - t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) - } - - var dst TableAttrs - if err := Unmarshal(data, &dst); err != nil { - t.Errorf("Unmarshal: %v", err) - } - - if dst != src { - t.Errorf("Unmarshal = %q, want %q", dst, src) - } -} - -type MyCharData struct { - body string -} - -func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { - for { - t, err := d.Token() - if err == io.EOF { // found end of element - break - } - if err != nil { - return err - } - if char, ok := t.(CharData); ok { - m.body += string(char) - } - } - return nil -} - -var _ Unmarshaler = (*MyCharData)(nil) - -func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { - panic("must not call") -} - -type MyAttr struct { - attr string -} - -func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { - m.attr = attr.Value - return nil -} - -var _ UnmarshalerAttr = (*MyAttr)(nil) - -type MyStruct struct { - Data *MyCharData - Attr *MyAttr `xml:",attr"` - - Data2 MyCharData - Attr2 MyAttr `xml:",attr"` -} - -func TestUnmarshaler(t *testing.T) { - xml := ` - - hello world - howdy world - - ` - - var m MyStruct - if err := Unmarshal([]byte(xml), &m); err != nil { - t.Fatal(err) - } - - if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { - t.Errorf("m=%#+v\n", m) - } -} - -type Pea struct { - Cotelydon string -} - -type Pod struct { - Pea interface{} `xml:"Pea"` -} - -// https://golang.org/issue/6836 -func TestUnmarshalIntoInterface(t *testing.T) { - pod := new(Pod) - pod.Pea = new(Pea) - xml := `Green stuff` - err := Unmarshal([]byte(xml), pod) - if err != nil { - t.Fatalf("failed to unmarshal %q: %v", xml, err) - } - pea, ok := pod.Pea.(*Pea) - if !ok { - t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) - } - have, want := pea.Cotelydon, "Green stuff" - if have != want { - t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) - } -} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go deleted file mode 100644 index fdde288b..00000000 --- a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xml - -import ( - "fmt" - "reflect" - "strings" - "sync" -) - -// typeInfo holds details for the xml representation of a type. -type typeInfo struct { - xmlname *fieldInfo - fields []fieldInfo -} - -// fieldInfo holds details for the xml representation of a single field. -type fieldInfo struct { - idx []int - name string - xmlns string - flags fieldFlags - parents []string -} - -type fieldFlags int - -const ( - fElement fieldFlags = 1 << iota - fAttr - fCharData - fInnerXml - fComment - fAny - - fOmitEmpty - - fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny -) - -var tinfoMap = make(map[reflect.Type]*typeInfo) -var tinfoLock sync.RWMutex - -var nameType = reflect.TypeOf(Name{}) - -// getTypeInfo returns the typeInfo structure with details necessary -// for marshalling and unmarshalling typ. -func getTypeInfo(typ reflect.Type) (*typeInfo, error) { - tinfoLock.RLock() - tinfo, ok := tinfoMap[typ] - tinfoLock.RUnlock() - if ok { - return tinfo, nil - } - tinfo = &typeInfo{} - if typ.Kind() == reflect.Struct && typ != nameType { - n := typ.NumField() - for i := 0; i < n; i++ { - f := typ.Field(i) - if f.PkgPath != "" || f.Tag.Get("xml") == "-" { - continue // Private field - } - - // For embedded structs, embed its fields. - if f.Anonymous { - t := f.Type - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Kind() == reflect.Struct { - inner, err := getTypeInfo(t) - if err != nil { - return nil, err - } - if tinfo.xmlname == nil { - tinfo.xmlname = inner.xmlname - } - for _, finfo := range inner.fields { - finfo.idx = append([]int{i}, finfo.idx...) - if err := addFieldInfo(typ, tinfo, &finfo); err != nil { - return nil, err - } - } - continue - } - } - - finfo, err := structFieldInfo(typ, &f) - if err != nil { - return nil, err - } - - if f.Name == "XMLName" { - tinfo.xmlname = finfo - continue - } - - // Add the field if it doesn't conflict with other fields. - if err := addFieldInfo(typ, tinfo, finfo); err != nil { - return nil, err - } - } - } - tinfoLock.Lock() - tinfoMap[typ] = tinfo - tinfoLock.Unlock() - return tinfo, nil -} - -// structFieldInfo builds and returns a fieldInfo for f. -func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { - finfo := &fieldInfo{idx: f.Index} - - // Split the tag from the xml namespace if necessary. - tag := f.Tag.Get("xml") - if i := strings.Index(tag, " "); i >= 0 { - finfo.xmlns, tag = tag[:i], tag[i+1:] - } - - // Parse flags. - tokens := strings.Split(tag, ",") - if len(tokens) == 1 { - finfo.flags = fElement - } else { - tag = tokens[0] - for _, flag := range tokens[1:] { - switch flag { - case "attr": - finfo.flags |= fAttr - case "chardata": - finfo.flags |= fCharData - case "innerxml": - finfo.flags |= fInnerXml - case "comment": - finfo.flags |= fComment - case "any": - finfo.flags |= fAny - case "omitempty": - finfo.flags |= fOmitEmpty - } - } - - // Validate the flags used. - valid := true - switch mode := finfo.flags & fMode; mode { - case 0: - finfo.flags |= fElement - case fAttr, fCharData, fInnerXml, fComment, fAny: - if f.Name == "XMLName" || tag != "" && mode != fAttr { - valid = false - } - default: - // This will also catch multiple modes in a single field. - valid = false - } - if finfo.flags&fMode == fAny { - finfo.flags |= fElement - } - if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { - valid = false - } - if !valid { - return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", - f.Name, typ, f.Tag.Get("xml")) - } - } - - // Use of xmlns without a name is not allowed. - if finfo.xmlns != "" && tag == "" { - return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", - f.Name, typ, f.Tag.Get("xml")) - } - - if f.Name == "XMLName" { - // The XMLName field records the XML element name. Don't - // process it as usual because its name should default to - // empty rather than to the field name. - finfo.name = tag - return finfo, nil - } - - if tag == "" { - // If the name part of the tag is completely empty, get - // default from XMLName of underlying struct if feasible, - // or field name otherwise. - if xmlname := lookupXMLName(f.Type); xmlname != nil { - finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name - } else { - finfo.name = f.Name - } - return finfo, nil - } - - if finfo.xmlns == "" && finfo.flags&fAttr == 0 { - // If it's an element no namespace specified, get the default - // from the XMLName of enclosing struct if possible. - if xmlname := lookupXMLName(typ); xmlname != nil { - finfo.xmlns = xmlname.xmlns - } - } - - // Prepare field name and parents. - parents := strings.Split(tag, ">") - if parents[0] == "" { - parents[0] = f.Name - } - if parents[len(parents)-1] == "" { - return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) - } - finfo.name = parents[len(parents)-1] - if len(parents) > 1 { - if (finfo.flags & fElement) == 0 { - return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) - } - finfo.parents = parents[:len(parents)-1] - } - - // If the field type has an XMLName field, the names must match - // so that the behavior of both marshalling and unmarshalling - // is straightforward and unambiguous. - if finfo.flags&fElement != 0 { - ftyp := f.Type - xmlname := lookupXMLName(ftyp) - if xmlname != nil && xmlname.name != finfo.name { - return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", - finfo.name, typ, f.Name, xmlname.name, ftyp) - } - } - return finfo, nil -} - -// lookupXMLName returns the fieldInfo for typ's XMLName field -// in case it exists and has a valid xml field tag, otherwise -// it returns nil. -func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { - for typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - if typ.Kind() != reflect.Struct { - return nil - } - for i, n := 0, typ.NumField(); i < n; i++ { - f := typ.Field(i) - if f.Name != "XMLName" { - continue - } - finfo, err := structFieldInfo(typ, &f) - if finfo.name != "" && err == nil { - return finfo - } - // Also consider errors as a non-existent field tag - // and let getTypeInfo itself report the error. - break - } - return nil -} - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -// addFieldInfo adds finfo to tinfo.fields if there are no -// conflicts, or if conflicts arise from previous fields that were -// obtained from deeper embedded structures than finfo. In the latter -// case, the conflicting entries are dropped. -// A conflict occurs when the path (parent + name) to a field is -// itself a prefix of another path, or when two paths match exactly. -// It is okay for field paths to share a common, shorter prefix. -func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { - var conflicts []int -Loop: - // First, figure all conflicts. Most working code will have none. - for i := range tinfo.fields { - oldf := &tinfo.fields[i] - if oldf.flags&fMode != newf.flags&fMode { - continue - } - if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { - continue - } - minl := min(len(newf.parents), len(oldf.parents)) - for p := 0; p < minl; p++ { - if oldf.parents[p] != newf.parents[p] { - continue Loop - } - } - if len(oldf.parents) > len(newf.parents) { - if oldf.parents[len(newf.parents)] == newf.name { - conflicts = append(conflicts, i) - } - } else if len(oldf.parents) < len(newf.parents) { - if newf.parents[len(oldf.parents)] == oldf.name { - conflicts = append(conflicts, i) - } - } else { - if newf.name == oldf.name { - conflicts = append(conflicts, i) - } - } - } - // Without conflicts, add the new field and return. - if conflicts == nil { - tinfo.fields = append(tinfo.fields, *newf) - return nil - } - - // If any conflict is shallower, ignore the new field. - // This matches the Go field resolution on embedding. - for _, i := range conflicts { - if len(tinfo.fields[i].idx) < len(newf.idx) { - return nil - } - } - - // Otherwise, if any of them is at the same depth level, it's an error. - for _, i := range conflicts { - oldf := &tinfo.fields[i] - if len(oldf.idx) == len(newf.idx) { - f1 := typ.FieldByIndex(oldf.idx) - f2 := typ.FieldByIndex(newf.idx) - return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} - } - } - - // Otherwise, the new field is shallower, and thus takes precedence, - // so drop the conflicting fields from tinfo and append the new one. - for c := len(conflicts) - 1; c >= 0; c-- { - i := conflicts[c] - copy(tinfo.fields[i:], tinfo.fields[i+1:]) - tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] - } - tinfo.fields = append(tinfo.fields, *newf) - return nil -} - -// A TagPathError represents an error in the unmarshalling process -// caused by the use of field tags with conflicting paths. -type TagPathError struct { - Struct reflect.Type - Field1, Tag1 string - Field2, Tag2 string -} - -func (e *TagPathError) Error() string { - return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) -} - -// value returns v's field value corresponding to finfo. -// It's equivalent to v.FieldByIndex(finfo.idx), but initializes -// and dereferences pointers as necessary. -func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { - for i, x := range finfo.idx { - if i > 0 { - t := v.Type() - if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - } - v = v.Field(x) - } - return v -} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go deleted file mode 100644 index 5b79cbec..00000000 --- a/vendor/golang.org/x/net/webdav/internal/xml/xml.go +++ /dev/null @@ -1,1998 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xml implements a simple XML 1.0 parser that -// understands XML name spaces. -package xml - -// References: -// Annotated XML spec: http://www.xml.com/axml/testaxml.htm -// XML name spaces: http://www.w3.org/TR/REC-xml-names/ - -// TODO(rsc): -// Test error handling. - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// A SyntaxError represents a syntax error in the XML input stream. -type SyntaxError struct { - Msg string - Line int -} - -func (e *SyntaxError) Error() string { - return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg -} - -// A Name represents an XML name (Local) annotated with a name space -// identifier (Space). In tokens returned by Decoder.Token, the Space -// identifier is given as a canonical URL, not the short prefix used in -// the document being parsed. -// -// As a special case, XML namespace declarations will use the literal -// string "xmlns" for the Space field instead of the fully resolved URL. -// See Encoder.EncodeToken for more information on namespace encoding -// behaviour. -type Name struct { - Space, Local string -} - -// isNamespace reports whether the name is a namespace-defining name. -func (name Name) isNamespace() bool { - return name.Local == "xmlns" || name.Space == "xmlns" -} - -// An Attr represents an attribute in an XML element (Name=Value). -type Attr struct { - Name Name - Value string -} - -// A Token is an interface holding one of the token types: -// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. -type Token interface{} - -// A StartElement represents an XML start element. -type StartElement struct { - Name Name - Attr []Attr -} - -func (e StartElement) Copy() StartElement { - attrs := make([]Attr, len(e.Attr)) - copy(attrs, e.Attr) - e.Attr = attrs - return e -} - -// End returns the corresponding XML end element. -func (e StartElement) End() EndElement { - return EndElement{e.Name} -} - -// setDefaultNamespace sets the namespace of the element -// as the default for all elements contained within it. -func (e *StartElement) setDefaultNamespace() { - if e.Name.Space == "" { - // If there's no namespace on the element, don't - // set the default. Strictly speaking this might be wrong, as - // we can't tell if the element had no namespace set - // or was just using the default namespace. - return - } - // Don't add a default name space if there's already one set. - for _, attr := range e.Attr { - if attr.Name.Space == "" && attr.Name.Local == "xmlns" { - return - } - } - e.Attr = append(e.Attr, Attr{ - Name: Name{ - Local: "xmlns", - }, - Value: e.Name.Space, - }) -} - -// An EndElement represents an XML end element. -type EndElement struct { - Name Name -} - -// A CharData represents XML character data (raw text), -// in which XML escape sequences have been replaced by -// the characters they represent. -type CharData []byte - -func makeCopy(b []byte) []byte { - b1 := make([]byte, len(b)) - copy(b1, b) - return b1 -} - -func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } - -// A Comment represents an XML comment of the form . -// The bytes do not include the comment markers. -type Comment []byte - -func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } - -// A ProcInst represents an XML processing instruction of the form -type ProcInst struct { - Target string - Inst []byte -} - -func (p ProcInst) Copy() ProcInst { - p.Inst = makeCopy(p.Inst) - return p -} - -// A Directive represents an XML directive of the form . -// The bytes do not include the markers. -type Directive []byte - -func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } - -// CopyToken returns a copy of a Token. -func CopyToken(t Token) Token { - switch v := t.(type) { - case CharData: - return v.Copy() - case Comment: - return v.Copy() - case Directive: - return v.Copy() - case ProcInst: - return v.Copy() - case StartElement: - return v.Copy() - } - return t -} - -// A Decoder represents an XML parser reading a particular input stream. -// The parser assumes that its input is encoded in UTF-8. -type Decoder struct { - // Strict defaults to true, enforcing the requirements - // of the XML specification. - // If set to false, the parser allows input containing common - // mistakes: - // * If an element is missing an end tag, the parser invents - // end tags as necessary to keep the return values from Token - // properly balanced. - // * In attribute values and character data, unknown or malformed - // character entities (sequences beginning with &) are left alone. - // - // Setting: - // - // d.Strict = false; - // d.AutoClose = HTMLAutoClose; - // d.Entity = HTMLEntity - // - // creates a parser that can handle typical HTML. - // - // Strict mode does not enforce the requirements of the XML name spaces TR. - // In particular it does not reject name space tags using undefined prefixes. - // Such tags are recorded with the unknown prefix as the name space URL. - Strict bool - - // When Strict == false, AutoClose indicates a set of elements to - // consider closed immediately after they are opened, regardless - // of whether an end element is present. - AutoClose []string - - // Entity can be used to map non-standard entity names to string replacements. - // The parser behaves as if these standard mappings are present in the map, - // regardless of the actual map content: - // - // "lt": "<", - // "gt": ">", - // "amp": "&", - // "apos": "'", - // "quot": `"`, - Entity map[string]string - - // CharsetReader, if non-nil, defines a function to generate - // charset-conversion readers, converting from the provided - // non-UTF-8 charset into UTF-8. If CharsetReader is nil or - // returns an error, parsing stops with an error. One of the - // the CharsetReader's result values must be non-nil. - CharsetReader func(charset string, input io.Reader) (io.Reader, error) - - // DefaultSpace sets the default name space used for unadorned tags, - // as if the entire XML stream were wrapped in an element containing - // the attribute xmlns="DefaultSpace". - DefaultSpace string - - r io.ByteReader - buf bytes.Buffer - saved *bytes.Buffer - stk *stack - free *stack - needClose bool - toClose Name - nextToken Token - nextByte int - ns map[string]string - err error - line int - offset int64 - unmarshalDepth int -} - -// NewDecoder creates a new XML parser reading from r. -// If r does not implement io.ByteReader, NewDecoder will -// do its own buffering. -func NewDecoder(r io.Reader) *Decoder { - d := &Decoder{ - ns: make(map[string]string), - nextByte: -1, - line: 1, - Strict: true, - } - d.switchToReader(r) - return d -} - -// Token returns the next XML token in the input stream. -// At the end of the input stream, Token returns nil, io.EOF. -// -// Slices of bytes in the returned token data refer to the -// parser's internal buffer and remain valid only until the next -// call to Token. To acquire a copy of the bytes, call CopyToken -// or the token's Copy method. -// -// Token expands self-closing elements such as
    -// into separate start and end elements returned by successive calls. -// -// Token guarantees that the StartElement and EndElement -// tokens it returns are properly nested and matched: -// if Token encounters an unexpected end element, -// it will return an error. -// -// Token implements XML name spaces as described by -// http://www.w3.org/TR/REC-xml-names/. Each of the -// Name structures contained in the Token has the Space -// set to the URL identifying its name space when known. -// If Token encounters an unrecognized name space prefix, -// it uses the prefix as the Space rather than report an error. -func (d *Decoder) Token() (t Token, err error) { - if d.stk != nil && d.stk.kind == stkEOF { - err = io.EOF - return - } - if d.nextToken != nil { - t = d.nextToken - d.nextToken = nil - } else if t, err = d.rawToken(); err != nil { - return - } - - if !d.Strict { - if t1, ok := d.autoClose(t); ok { - d.nextToken = t - t = t1 - } - } - switch t1 := t.(type) { - case StartElement: - // In XML name spaces, the translations listed in the - // attributes apply to the element name and - // to the other attribute names, so process - // the translations first. - for _, a := range t1.Attr { - if a.Name.Space == "xmlns" { - v, ok := d.ns[a.Name.Local] - d.pushNs(a.Name.Local, v, ok) - d.ns[a.Name.Local] = a.Value - } - if a.Name.Space == "" && a.Name.Local == "xmlns" { - // Default space for untagged names - v, ok := d.ns[""] - d.pushNs("", v, ok) - d.ns[""] = a.Value - } - } - - d.translate(&t1.Name, true) - for i := range t1.Attr { - d.translate(&t1.Attr[i].Name, false) - } - d.pushElement(t1.Name) - t = t1 - - case EndElement: - d.translate(&t1.Name, true) - if !d.popElement(&t1) { - return nil, d.err - } - t = t1 - } - return -} - -const xmlURL = "http://www.w3.org/XML/1998/namespace" - -// Apply name space translation to name n. -// The default name space (for Space=="") -// applies only to element names, not to attribute names. -func (d *Decoder) translate(n *Name, isElementName bool) { - switch { - case n.Space == "xmlns": - return - case n.Space == "" && !isElementName: - return - case n.Space == "xml": - n.Space = xmlURL - case n.Space == "" && n.Local == "xmlns": - return - } - if v, ok := d.ns[n.Space]; ok { - n.Space = v - } else if n.Space == "" { - n.Space = d.DefaultSpace - } -} - -func (d *Decoder) switchToReader(r io.Reader) { - // Get efficient byte at a time reader. - // Assume that if reader has its own - // ReadByte, it's efficient enough. - // Otherwise, use bufio. - if rb, ok := r.(io.ByteReader); ok { - d.r = rb - } else { - d.r = bufio.NewReader(r) - } -} - -// Parsing state - stack holds old name space translations -// and the current set of open elements. The translations to pop when -// ending a given tag are *below* it on the stack, which is -// more work but forced on us by XML. -type stack struct { - next *stack - kind int - name Name - ok bool -} - -const ( - stkStart = iota - stkNs - stkEOF -) - -func (d *Decoder) push(kind int) *stack { - s := d.free - if s != nil { - d.free = s.next - } else { - s = new(stack) - } - s.next = d.stk - s.kind = kind - d.stk = s - return s -} - -func (d *Decoder) pop() *stack { - s := d.stk - if s != nil { - d.stk = s.next - s.next = d.free - d.free = s - } - return s -} - -// Record that after the current element is finished -// (that element is already pushed on the stack) -// Token should return EOF until popEOF is called. -func (d *Decoder) pushEOF() { - // Walk down stack to find Start. - // It might not be the top, because there might be stkNs - // entries above it. - start := d.stk - for start.kind != stkStart { - start = start.next - } - // The stkNs entries below a start are associated with that - // element too; skip over them. - for start.next != nil && start.next.kind == stkNs { - start = start.next - } - s := d.free - if s != nil { - d.free = s.next - } else { - s = new(stack) - } - s.kind = stkEOF - s.next = start.next - start.next = s -} - -// Undo a pushEOF. -// The element must have been finished, so the EOF should be at the top of the stack. -func (d *Decoder) popEOF() bool { - if d.stk == nil || d.stk.kind != stkEOF { - return false - } - d.pop() - return true -} - -// Record that we are starting an element with the given name. -func (d *Decoder) pushElement(name Name) { - s := d.push(stkStart) - s.name = name -} - -// Record that we are changing the value of ns[local]. -// The old value is url, ok. -func (d *Decoder) pushNs(local string, url string, ok bool) { - s := d.push(stkNs) - s.name.Local = local - s.name.Space = url - s.ok = ok -} - -// Creates a SyntaxError with the current line number. -func (d *Decoder) syntaxError(msg string) error { - return &SyntaxError{Msg: msg, Line: d.line} -} - -// Record that we are ending an element with the given name. -// The name must match the record at the top of the stack, -// which must be a pushElement record. -// After popping the element, apply any undo records from -// the stack to restore the name translations that existed -// before we saw this element. -func (d *Decoder) popElement(t *EndElement) bool { - s := d.pop() - name := t.Name - switch { - case s == nil || s.kind != stkStart: - d.err = d.syntaxError("unexpected end element ") - return false - case s.name.Local != name.Local: - if !d.Strict { - d.needClose = true - d.toClose = t.Name - t.Name = s.name - return true - } - d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") - return false - case s.name.Space != name.Space: - d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + - "closed by in space " + name.Space) - return false - } - - // Pop stack until a Start or EOF is on the top, undoing the - // translations that were associated with the element we just closed. - for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { - s := d.pop() - if s.ok { - d.ns[s.name.Local] = s.name.Space - } else { - delete(d.ns, s.name.Local) - } - } - - return true -} - -// If the top element on the stack is autoclosing and -// t is not the end tag, invent the end tag. -func (d *Decoder) autoClose(t Token) (Token, bool) { - if d.stk == nil || d.stk.kind != stkStart { - return nil, false - } - name := strings.ToLower(d.stk.name.Local) - for _, s := range d.AutoClose { - if strings.ToLower(s) == name { - // This one should be auto closed if t doesn't close it. - et, ok := t.(EndElement) - if !ok || et.Name.Local != name { - return EndElement{d.stk.name}, true - } - break - } - } - return nil, false -} - -var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") - -// RawToken is like Token but does not verify that -// start and end elements match and does not translate -// name space prefixes to their corresponding URLs. -func (d *Decoder) RawToken() (Token, error) { - if d.unmarshalDepth > 0 { - return nil, errRawToken - } - return d.rawToken() -} - -func (d *Decoder) rawToken() (Token, error) { - if d.err != nil { - return nil, d.err - } - if d.needClose { - // The last element we read was self-closing and - // we returned just the StartElement half. - // Return the EndElement half now. - d.needClose = false - return EndElement{d.toClose}, nil - } - - b, ok := d.getc() - if !ok { - return nil, d.err - } - - if b != '<' { - // Text section. - d.ungetc(b) - data := d.text(-1, false) - if data == nil { - return nil, d.err - } - return CharData(data), nil - } - - if b, ok = d.mustgetc(); !ok { - return nil, d.err - } - switch b { - case '/': - // ' { - d.err = d.syntaxError("invalid characters between ") - return nil, d.err - } - return EndElement{name}, nil - - case '?': - // ' { - break - } - b0 = b - } - data := d.buf.Bytes() - data = data[0 : len(data)-2] // chop ?> - - if target == "xml" { - content := string(data) - ver := procInst("version", content) - if ver != "" && ver != "1.0" { - d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) - return nil, d.err - } - enc := procInst("encoding", content) - if enc != "" && enc != "utf-8" && enc != "UTF-8" { - if d.CharsetReader == nil { - d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) - return nil, d.err - } - newr, err := d.CharsetReader(enc, d.r.(io.Reader)) - if err != nil { - d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) - return nil, d.err - } - if newr == nil { - panic("CharsetReader returned a nil Reader for charset " + enc) - } - d.switchToReader(newr) - } - } - return ProcInst{target, data}, nil - - case '!': - // ' { - break - } - b0, b1 = b1, b - } - data := d.buf.Bytes() - data = data[0 : len(data)-3] // chop --> - return Comment(data), nil - - case '[': // . - data := d.text(-1, true) - if data == nil { - return nil, d.err - } - return CharData(data), nil - } - - // Probably a directive: , , etc. - // We don't care, but accumulate for caller. Quoted angle - // brackets do not count for nesting. - d.buf.Reset() - d.buf.WriteByte(b) - inquote := uint8(0) - depth := 0 - for { - if b, ok = d.mustgetc(); !ok { - return nil, d.err - } - if inquote == 0 && b == '>' && depth == 0 { - break - } - HandleB: - d.buf.WriteByte(b) - switch { - case b == inquote: - inquote = 0 - - case inquote != 0: - // in quotes, no special action - - case b == '\'' || b == '"': - inquote = b - - case b == '>' && inquote == 0: - depth-- - - case b == '<' && inquote == 0: - // Look for ` - -var testEntity = map[string]string{"何": "What", "is-it": "is it?"} - -var rawTokens = []Token{ - CharData("\n"), - ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, - CharData("\n"), - Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" - "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), - CharData("\n"), - StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, - CharData("\n "), - StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, - CharData("World <>'\" 白鵬翔"), - EndElement{Name{"", "hello"}}, - CharData("\n "), - StartElement{Name{"", "query"}, []Attr{}}, - CharData("What is it?"), - EndElement{Name{"", "query"}}, - CharData("\n "), - StartElement{Name{"", "goodbye"}, []Attr{}}, - EndElement{Name{"", "goodbye"}}, - CharData("\n "), - StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, - CharData("\n "), - StartElement{Name{"", "inner"}, []Attr{}}, - EndElement{Name{"", "inner"}}, - CharData("\n "), - EndElement{Name{"", "outer"}}, - CharData("\n "), - StartElement{Name{"tag", "name"}, []Attr{}}, - CharData("\n "), - CharData("Some text here."), - CharData("\n "), - EndElement{Name{"tag", "name"}}, - CharData("\n"), - EndElement{Name{"", "body"}}, - Comment(" missing final newline "), -} - -var cookedTokens = []Token{ - CharData("\n"), - ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, - CharData("\n"), - Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" - "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), - CharData("\n"), - StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, - CharData("\n "), - StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, - CharData("World <>'\" 白鵬翔"), - EndElement{Name{"ns2", "hello"}}, - CharData("\n "), - StartElement{Name{"ns2", "query"}, []Attr{}}, - CharData("What is it?"), - EndElement{Name{"ns2", "query"}}, - CharData("\n "), - StartElement{Name{"ns2", "goodbye"}, []Attr{}}, - EndElement{Name{"ns2", "goodbye"}}, - CharData("\n "), - StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, - CharData("\n "), - StartElement{Name{"ns2", "inner"}, []Attr{}}, - EndElement{Name{"ns2", "inner"}}, - CharData("\n "), - EndElement{Name{"ns2", "outer"}}, - CharData("\n "), - StartElement{Name{"ns3", "name"}, []Attr{}}, - CharData("\n "), - CharData("Some text here."), - CharData("\n "), - EndElement{Name{"ns3", "name"}}, - CharData("\n"), - EndElement{Name{"ns2", "body"}}, - Comment(" missing final newline "), -} - -const testInputAltEncoding = ` - -VALUE` - -var rawTokensAltEncoding = []Token{ - CharData("\n"), - ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, - CharData("\n"), - StartElement{Name{"", "tag"}, []Attr{}}, - CharData("value"), - EndElement{Name{"", "tag"}}, -} - -var xmlInput = []string{ - // unexpected EOF cases - "<", - "", - "", - "", - // "", // let the Token() caller handle - "", - "", - "", - "", - " c;", - "", - "", - "", - // "", // let the Token() caller handle - "", - "", - "cdata]]>", -} - -func TestRawToken(t *testing.T) { - d := NewDecoder(strings.NewReader(testInput)) - d.Entity = testEntity - testRawToken(t, d, testInput, rawTokens) -} - -const nonStrictInput = ` -non&entity -&unknown;entity -{ -&#zzz; -&ãªã¾ãˆ3; -<-gt; -&; -&0a; -` - -var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} - -var nonStrictTokens = []Token{ - CharData("\n"), - StartElement{Name{"", "tag"}, []Attr{}}, - CharData("non&entity"), - EndElement{Name{"", "tag"}}, - CharData("\n"), - StartElement{Name{"", "tag"}, []Attr{}}, - CharData("&unknown;entity"), - EndElement{Name{"", "tag"}}, - CharData("\n"), - StartElement{Name{"", "tag"}, []Attr{}}, - CharData("{"), - EndElement{Name{"", "tag"}}, - CharData("\n"), - StartElement{Name{"", "tag"}, []Attr{}}, - CharData("&#zzz;"), - EndElement{Name{"", "tag"}}, - CharData("\n"), - StartElement{Name{"", "tag"}, []Attr{}}, - CharData("&ãªã¾ãˆ3;"), - EndElement{Name{"", "tag"}}, - CharData("\n"), - StartElement{Name{"", "tag"}, []Attr{}}, - CharData("<-gt;"), - EndElement{Name{"", "tag"}}, - CharData("\n"), - StartElement{Name{"", "tag"}, []Attr{}}, - CharData("&;"), - EndElement{Name{"", "tag"}}, - CharData("\n"), - StartElement{Name{"", "tag"}, []Attr{}}, - CharData("&0a;"), - EndElement{Name{"", "tag"}}, - CharData("\n"), -} - -func TestNonStrictRawToken(t *testing.T) { - d := NewDecoder(strings.NewReader(nonStrictInput)) - d.Strict = false - testRawToken(t, d, nonStrictInput, nonStrictTokens) -} - -type downCaser struct { - t *testing.T - r io.ByteReader -} - -func (d *downCaser) ReadByte() (c byte, err error) { - c, err = d.r.ReadByte() - if c >= 'A' && c <= 'Z' { - c += 'a' - 'A' - } - return -} - -func (d *downCaser) Read(p []byte) (int, error) { - d.t.Fatalf("unexpected Read call on downCaser reader") - panic("unreachable") -} - -func TestRawTokenAltEncoding(t *testing.T) { - d := NewDecoder(strings.NewReader(testInputAltEncoding)) - d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { - if charset != "x-testing-uppercase" { - t.Fatalf("unexpected charset %q", charset) - } - return &downCaser{t, input.(io.ByteReader)}, nil - } - testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) -} - -func TestRawTokenAltEncodingNoConverter(t *testing.T) { - d := NewDecoder(strings.NewReader(testInputAltEncoding)) - token, err := d.RawToken() - if token == nil { - t.Fatalf("expected a token on first RawToken call") - } - if err != nil { - t.Fatal(err) - } - token, err = d.RawToken() - if token != nil { - t.Errorf("expected a nil token; got %#v", token) - } - if err == nil { - t.Fatalf("expected an error on second RawToken call") - } - const encoding = "x-testing-uppercase" - if !strings.Contains(err.Error(), encoding) { - t.Errorf("expected error to contain %q; got error: %v", - encoding, err) - } -} - -func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { - lastEnd := int64(0) - for i, want := range rawTokens { - start := d.InputOffset() - have, err := d.RawToken() - end := d.InputOffset() - if err != nil { - t.Fatalf("token %d: unexpected error: %s", i, err) - } - if !reflect.DeepEqual(have, want) { - var shave, swant string - if _, ok := have.(CharData); ok { - shave = fmt.Sprintf("CharData(%q)", have) - } else { - shave = fmt.Sprintf("%#v", have) - } - if _, ok := want.(CharData); ok { - swant = fmt.Sprintf("CharData(%q)", want) - } else { - swant = fmt.Sprintf("%#v", want) - } - t.Errorf("token %d = %s, want %s", i, shave, swant) - } - - // Check that InputOffset returned actual token. - switch { - case start < lastEnd: - t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) - case start >= end: - // Special case: EndElement can be synthesized. - if start == end && end == lastEnd { - break - } - t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) - case end > int64(len(raw)): - t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) - default: - text := raw[start:end] - if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { - t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) - } - } - lastEnd = end - } -} - -// Ensure that directives (specifically !DOCTYPE) include the complete -// text of any nested directives, noting that < and > do not change -// nesting depth if they are in single or double quotes. - -var nestedDirectivesInput = ` -]> -">]> -]> -'>]> -]> -'>]> -]> -` - -var nestedDirectivesTokens = []Token{ - CharData("\n"), - Directive(`DOCTYPE []`), - CharData("\n"), - Directive(`DOCTYPE [">]`), - CharData("\n"), - Directive(`DOCTYPE []`), - CharData("\n"), - Directive(`DOCTYPE ['>]`), - CharData("\n"), - Directive(`DOCTYPE []`), - CharData("\n"), - Directive(`DOCTYPE ['>]`), - CharData("\n"), - Directive(`DOCTYPE []`), - CharData("\n"), -} - -func TestNestedDirectives(t *testing.T) { - d := NewDecoder(strings.NewReader(nestedDirectivesInput)) - - for i, want := range nestedDirectivesTokens { - have, err := d.Token() - if err != nil { - t.Fatalf("token %d: unexpected error: %s", i, err) - } - if !reflect.DeepEqual(have, want) { - t.Errorf("token %d = %#v want %#v", i, have, want) - } - } -} - -func TestToken(t *testing.T) { - d := NewDecoder(strings.NewReader(testInput)) - d.Entity = testEntity - - for i, want := range cookedTokens { - have, err := d.Token() - if err != nil { - t.Fatalf("token %d: unexpected error: %s", i, err) - } - if !reflect.DeepEqual(have, want) { - t.Errorf("token %d = %#v want %#v", i, have, want) - } - } -} - -func TestSyntax(t *testing.T) { - for i := range xmlInput { - d := NewDecoder(strings.NewReader(xmlInput[i])) - var err error - for _, err = d.Token(); err == nil; _, err = d.Token() { - } - if _, ok := err.(*SyntaxError); !ok { - t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) - } - } -} - -type allScalars struct { - True1 bool - True2 bool - False1 bool - False2 bool - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - Uint int - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - Uintptr uintptr - Float32 float32 - Float64 float64 - String string - PtrString *string -} - -var all = allScalars{ - True1: true, - True2: true, - False1: false, - False2: false, - Int: 1, - Int8: -2, - Int16: 3, - Int32: -4, - Int64: 5, - Uint: 6, - Uint8: 7, - Uint16: 8, - Uint32: 9, - Uint64: 10, - Uintptr: 11, - Float32: 13.0, - Float64: 14.0, - String: "15", - PtrString: &sixteen, -} - -var sixteen = "16" - -const testScalarsInput = ` - true - 1 - false - 0 - 1 - -2 - 3 - -4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12.0 - 13.0 - 14.0 - 15 - 16 -` - -func TestAllScalars(t *testing.T) { - var a allScalars - err := Unmarshal([]byte(testScalarsInput), &a) - - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a, all) { - t.Errorf("have %+v want %+v", a, all) - } -} - -type item struct { - Field_a string -} - -func TestIssue569(t *testing.T) { - data := `abcd` - var i item - err := Unmarshal([]byte(data), &i) - - if err != nil || i.Field_a != "abcd" { - t.Fatal("Expecting abcd") - } -} - -func TestUnquotedAttrs(t *testing.T) { - data := "" - d := NewDecoder(strings.NewReader(data)) - d.Strict = false - token, err := d.Token() - if _, ok := err.(*SyntaxError); ok { - t.Errorf("Unexpected error: %v", err) - } - if token.(StartElement).Name.Local != "tag" { - t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) - } - attr := token.(StartElement).Attr[0] - if attr.Value != "azAZ09:-_" { - t.Errorf("Unexpected attribute value: %v", attr.Value) - } - if attr.Name.Local != "attr" { - t.Errorf("Unexpected attribute name: %v", attr.Name.Local) - } -} - -func TestValuelessAttrs(t *testing.T) { - tests := [][3]string{ - {"

    ", "p", "nowrap"}, - {"

    ", "p", "nowrap"}, - {"", "input", "checked"}, - {"", "input", "checked"}, - } - for _, test := range tests { - d := NewDecoder(strings.NewReader(test[0])) - d.Strict = false - token, err := d.Token() - if _, ok := err.(*SyntaxError); ok { - t.Errorf("Unexpected error: %v", err) - } - if token.(StartElement).Name.Local != test[1] { - t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) - } - attr := token.(StartElement).Attr[0] - if attr.Value != test[2] { - t.Errorf("Unexpected attribute value: %v", attr.Value) - } - if attr.Name.Local != test[2] { - t.Errorf("Unexpected attribute name: %v", attr.Name.Local) - } - } -} - -func TestCopyTokenCharData(t *testing.T) { - data := []byte("same data") - var tok1 Token = CharData(data) - tok2 := CopyToken(tok1) - if !reflect.DeepEqual(tok1, tok2) { - t.Error("CopyToken(CharData) != CharData") - } - data[1] = 'o' - if reflect.DeepEqual(tok1, tok2) { - t.Error("CopyToken(CharData) uses same buffer.") - } -} - -func TestCopyTokenStartElement(t *testing.T) { - elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} - var tok1 Token = elt - tok2 := CopyToken(tok1) - if tok1.(StartElement).Attr[0].Value != "en" { - t.Error("CopyToken overwrote Attr[0]") - } - if !reflect.DeepEqual(tok1, tok2) { - t.Error("CopyToken(StartElement) != StartElement") - } - tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} - if reflect.DeepEqual(tok1, tok2) { - t.Error("CopyToken(CharData) uses same buffer.") - } -} - -func TestSyntaxErrorLineNum(t *testing.T) { - testInput := "

    Foo

    \n\n

    Bar\n" - d := NewDecoder(strings.NewReader(testInput)) - var err error - for _, err = d.Token(); err == nil; _, err = d.Token() { - } - synerr, ok := err.(*SyntaxError) - if !ok { - t.Error("Expected SyntaxError.") - } - if synerr.Line != 3 { - t.Error("SyntaxError didn't have correct line number.") - } -} - -func TestTrailingRawToken(t *testing.T) { - input := ` ` - d := NewDecoder(strings.NewReader(input)) - var err error - for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { - } - if err != io.EOF { - t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) - } -} - -func TestTrailingToken(t *testing.T) { - input := ` ` - d := NewDecoder(strings.NewReader(input)) - var err error - for _, err = d.Token(); err == nil; _, err = d.Token() { - } - if err != io.EOF { - t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) - } -} - -func TestEntityInsideCDATA(t *testing.T) { - input := `` - d := NewDecoder(strings.NewReader(input)) - var err error - for _, err = d.Token(); err == nil; _, err = d.Token() { - } - if err != io.EOF { - t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) - } -} - -var characterTests = []struct { - in string - err string -}{ - {"\x12", "illegal character code U+0012"}, - {"\x0b", "illegal character code U+000B"}, - {"\xef\xbf\xbe", "illegal character code U+FFFE"}, - {"\r\n\x07", "illegal character code U+0007"}, - {"what's up", "expected attribute name in element"}, - {"&abc\x01;", "invalid character entity &abc (no semicolon)"}, - {"&\x01;", "invalid character entity & (no semicolon)"}, - {"&\xef\xbf\xbe;", "invalid character entity &\uFFFE;"}, - {"&hello;", "invalid character entity &hello;"}, -} - -func TestDisallowedCharacters(t *testing.T) { - - for i, tt := range characterTests { - d := NewDecoder(strings.NewReader(tt.in)) - var err error - - for err == nil { - _, err = d.Token() - } - synerr, ok := err.(*SyntaxError) - if !ok { - t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) - } - if synerr.Msg != tt.err { - t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) - } - } -} - -type procInstEncodingTest struct { - expect, got string -} - -var procInstTests = []struct { - input string - expect [2]string -}{ - {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}}, - {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}}, - {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}}, - {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}}, - {`encoding="FOO" `, [2]string{"", "FOO"}}, -} - -func TestProcInstEncoding(t *testing.T) { - for _, test := range procInstTests { - if got := procInst("version", test.input); got != test.expect[0] { - t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0]) - } - if got := procInst("encoding", test.input); got != test.expect[1] { - t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1]) - } - } -} - -// Ensure that directives with comments include the complete -// text of any nested directives. - -var directivesWithCommentsInput = ` -]> -]> - --> --> []> -` - -var directivesWithCommentsTokens = []Token{ - CharData("\n"), - Directive(`DOCTYPE []`), - CharData("\n"), - Directive(`DOCTYPE []`), - CharData("\n"), - Directive(`DOCTYPE []`), - CharData("\n"), -} - -func TestDirectivesWithComments(t *testing.T) { - d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) - - for i, want := range directivesWithCommentsTokens { - have, err := d.Token() - if err != nil { - t.Fatalf("token %d: unexpected error: %s", i, err) - } - if !reflect.DeepEqual(have, want) { - t.Errorf("token %d = %#v want %#v", i, have, want) - } - } -} - -// Writer whose Write method always returns an error. -type errWriter struct{} - -func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } - -func TestEscapeTextIOErrors(t *testing.T) { - expectErr := "unwritable" - err := EscapeText(errWriter{}, []byte{'A'}) - - if err == nil || err.Error() != expectErr { - t.Errorf("have %v, want %v", err, expectErr) - } -} - -func TestEscapeTextInvalidChar(t *testing.T) { - input := []byte("A \x00 terminated string.") - expected := "A \uFFFD terminated string." - - buff := new(bytes.Buffer) - if err := EscapeText(buff, input); err != nil { - t.Fatalf("have %v, want nil", err) - } - text := buff.String() - - if text != expected { - t.Errorf("have %v, want %v", text, expected) - } -} - -func TestIssue5880(t *testing.T) { - type T []byte - data, err := Marshal(T{192, 168, 0, 1}) - if err != nil { - t.Errorf("Marshal error: %v", err) - } - if !utf8.Valid(data) { - t.Errorf("Marshal generated invalid UTF-8: %x", data) - } -} diff --git a/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/golang.org/x/net/webdav/litmus_test_server.go deleted file mode 100644 index 514db5dd..00000000 --- a/vendor/golang.org/x/net/webdav/litmus_test_server.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program is a server for the WebDAV 'litmus' compliance test at -http://www.webdav.org/neon/litmus/ -To run the test: - -go run litmus_test_server.go - -and separately, from the downloaded litmus-xxx directory: - -make URL=http://localhost:9999/ check -*/ -package main - -import ( - "flag" - "fmt" - "log" - "net/http" - "net/url" - - "golang.org/x/net/webdav" -) - -var port = flag.Int("port", 9999, "server port") - -func main() { - flag.Parse() - log.SetFlags(0) - h := &webdav.Handler{ - FileSystem: webdav.NewMemFS(), - LockSystem: webdav.NewMemLS(), - Logger: func(r *http.Request, err error) { - litmus := r.Header.Get("X-Litmus") - if len(litmus) > 19 { - litmus = litmus[:16] + "..." - } - - switch r.Method { - case "COPY", "MOVE": - dst := "" - if u, err := url.Parse(r.Header.Get("Destination")); err == nil { - dst = u.Path - } - o := r.Header.Get("Overwrite") - log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err) - default: - log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err) - } - }, - } - - // The next line would normally be: - // http.Handle("/", h) - // but we wrap that HTTP handler h to cater for a special case. - // - // The propfind_invalid2 litmus test case expects an empty namespace prefix - // declaration to be an error. The FAQ in the webdav litmus test says: - // - // "What does the "propfind_invalid2" test check for?... - // - // If a request was sent with an XML body which included an empty namespace - // prefix declaration (xmlns:ns1=""), then the server must reject that with - // a "400 Bad Request" response, as it is invalid according to the XML - // Namespace specification." - // - // On the other hand, the Go standard library's encoding/xml package - // accepts an empty xmlns namespace, as per the discussion at - // https://github.com/golang/go/issues/8068 - // - // Empty namespaces seem disallowed in the second (2006) edition of the XML - // standard, but allowed in a later edition. The grammar differs between - // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and - // http://www.w3.org/TR/REC-xml-names/#dt-prefix - // - // Thus, we assume that the propfind_invalid2 test is obsolete, and - // hard-code the 400 Bad Request response that the test expects. - http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" { - http.Error(w, "400 Bad Request", http.StatusBadRequest) - return - } - h.ServeHTTP(w, r) - })) - - addr := fmt.Sprintf(":%d", *port) - log.Printf("Serving %v", addr) - log.Fatal(http.ListenAndServe(addr, nil)) -} diff --git a/vendor/golang.org/x/net/webdav/lock.go b/vendor/golang.org/x/net/webdav/lock.go deleted file mode 100644 index 344ac5ce..00000000 --- a/vendor/golang.org/x/net/webdav/lock.go +++ /dev/null @@ -1,445 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -import ( - "container/heap" - "errors" - "strconv" - "strings" - "sync" - "time" -) - -var ( - // ErrConfirmationFailed is returned by a LockSystem's Confirm method. - ErrConfirmationFailed = errors.New("webdav: confirmation failed") - // ErrForbidden is returned by a LockSystem's Unlock method. - ErrForbidden = errors.New("webdav: forbidden") - // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods. - ErrLocked = errors.New("webdav: locked") - // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods. - ErrNoSuchLock = errors.New("webdav: no such lock") -) - -// Condition can match a WebDAV resource, based on a token or ETag. -// Exactly one of Token and ETag should be non-empty. -type Condition struct { - Not bool - Token string - ETag string -} - -// LockSystem manages access to a collection of named resources. The elements -// in a lock name are separated by slash ('/', U+002F) characters, regardless -// of host operating system convention. -type LockSystem interface { - // Confirm confirms that the caller can claim all of the locks specified by - // the given conditions, and that holding the union of all of those locks - // gives exclusive access to all of the named resources. Up to two resources - // can be named. Empty names are ignored. - // - // Exactly one of release and err will be non-nil. If release is non-nil, - // all of the requested locks are held until release is called. Calling - // release does not unlock the lock, in the WebDAV UNLOCK sense, but once - // Confirm has confirmed that a lock claim is valid, that lock cannot be - // Confirmed again until it has been released. - // - // If Confirm returns ErrConfirmationFailed then the Handler will continue - // to try any other set of locks presented (a WebDAV HTTP request can - // present more than one set of locks). If it returns any other non-nil - // error, the Handler will write a "500 Internal Server Error" HTTP status. - Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error) - - // Create creates a lock with the given depth, duration, owner and root - // (name). The depth will either be negative (meaning infinite) or zero. - // - // If Create returns ErrLocked then the Handler will write a "423 Locked" - // HTTP status. If it returns any other non-nil error, the Handler will - // write a "500 Internal Server Error" HTTP status. - // - // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for - // when to use each error. - // - // The token returned identifies the created lock. It should be an absolute - // URI as defined by RFC 3986, Section 4.3. In particular, it should not - // contain whitespace. - Create(now time.Time, details LockDetails) (token string, err error) - - // Refresh refreshes the lock with the given token. - // - // If Refresh returns ErrLocked then the Handler will write a "423 Locked" - // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write - // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil - // error, the Handler will write a "500 Internal Server Error" HTTP status. - // - // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for - // when to use each error. - Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) - - // Unlock unlocks the lock with the given token. - // - // If Unlock returns ErrForbidden then the Handler will write a "403 - // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler - // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock - // then the Handler will write a "409 Conflict" HTTP Status. If it returns - // any other non-nil error, the Handler will write a "500 Internal Server - // Error" HTTP status. - // - // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for - // when to use each error. - Unlock(now time.Time, token string) error -} - -// LockDetails are a lock's metadata. -type LockDetails struct { - // Root is the root resource name being locked. For a zero-depth lock, the - // root is the only resource being locked. - Root string - // Duration is the lock timeout. A negative duration means infinite. - Duration time.Duration - // OwnerXML is the verbatim XML given in a LOCK HTTP request. - // - // TODO: does the "verbatim" nature play well with XML namespaces? - // Does the OwnerXML field need to have more structure? See - // https://codereview.appspot.com/175140043/#msg2 - OwnerXML string - // ZeroDepth is whether the lock has zero depth. If it does not have zero - // depth, it has infinite depth. - ZeroDepth bool -} - -// NewMemLS returns a new in-memory LockSystem. -func NewMemLS() LockSystem { - return &memLS{ - byName: make(map[string]*memLSNode), - byToken: make(map[string]*memLSNode), - gen: uint64(time.Now().Unix()), - } -} - -type memLS struct { - mu sync.Mutex - byName map[string]*memLSNode - byToken map[string]*memLSNode - gen uint64 - // byExpiry only contains those nodes whose LockDetails have a finite - // Duration and are yet to expire. - byExpiry byExpiry -} - -func (m *memLS) nextToken() string { - m.gen++ - return strconv.FormatUint(m.gen, 10) -} - -func (m *memLS) collectExpiredNodes(now time.Time) { - for len(m.byExpiry) > 0 { - if now.Before(m.byExpiry[0].expiry) { - break - } - m.remove(m.byExpiry[0]) - } -} - -func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) { - m.mu.Lock() - defer m.mu.Unlock() - m.collectExpiredNodes(now) - - var n0, n1 *memLSNode - if name0 != "" { - if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil { - return nil, ErrConfirmationFailed - } - } - if name1 != "" { - if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil { - return nil, ErrConfirmationFailed - } - } - - // Don't hold the same node twice. - if n1 == n0 { - n1 = nil - } - - if n0 != nil { - m.hold(n0) - } - if n1 != nil { - m.hold(n1) - } - return func() { - m.mu.Lock() - defer m.mu.Unlock() - if n1 != nil { - m.unhold(n1) - } - if n0 != nil { - m.unhold(n0) - } - }, nil -} - -// lookup returns the node n that locks the named resource, provided that n -// matches at least one of the given conditions and that lock isn't held by -// another party. Otherwise, it returns nil. -// -// n may be a parent of the named resource, if n is an infinite depth lock. -func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) { - // TODO: support Condition.Not and Condition.ETag. - for _, c := range conditions { - n = m.byToken[c.Token] - if n == nil || n.held { - continue - } - if name == n.details.Root { - return n - } - if n.details.ZeroDepth { - continue - } - if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") { - return n - } - } - return nil -} - -func (m *memLS) hold(n *memLSNode) { - if n.held { - panic("webdav: memLS inconsistent held state") - } - n.held = true - if n.details.Duration >= 0 && n.byExpiryIndex >= 0 { - heap.Remove(&m.byExpiry, n.byExpiryIndex) - } -} - -func (m *memLS) unhold(n *memLSNode) { - if !n.held { - panic("webdav: memLS inconsistent held state") - } - n.held = false - if n.details.Duration >= 0 { - heap.Push(&m.byExpiry, n) - } -} - -func (m *memLS) Create(now time.Time, details LockDetails) (string, error) { - m.mu.Lock() - defer m.mu.Unlock() - m.collectExpiredNodes(now) - details.Root = slashClean(details.Root) - - if !m.canCreate(details.Root, details.ZeroDepth) { - return "", ErrLocked - } - n := m.create(details.Root) - n.token = m.nextToken() - m.byToken[n.token] = n - n.details = details - if n.details.Duration >= 0 { - n.expiry = now.Add(n.details.Duration) - heap.Push(&m.byExpiry, n) - } - return n.token, nil -} - -func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) { - m.mu.Lock() - defer m.mu.Unlock() - m.collectExpiredNodes(now) - - n := m.byToken[token] - if n == nil { - return LockDetails{}, ErrNoSuchLock - } - if n.held { - return LockDetails{}, ErrLocked - } - if n.byExpiryIndex >= 0 { - heap.Remove(&m.byExpiry, n.byExpiryIndex) - } - n.details.Duration = duration - if n.details.Duration >= 0 { - n.expiry = now.Add(n.details.Duration) - heap.Push(&m.byExpiry, n) - } - return n.details, nil -} - -func (m *memLS) Unlock(now time.Time, token string) error { - m.mu.Lock() - defer m.mu.Unlock() - m.collectExpiredNodes(now) - - n := m.byToken[token] - if n == nil { - return ErrNoSuchLock - } - if n.held { - return ErrLocked - } - m.remove(n) - return nil -} - -func (m *memLS) canCreate(name string, zeroDepth bool) bool { - return walkToRoot(name, func(name0 string, first bool) bool { - n := m.byName[name0] - if n == nil { - return true - } - if first { - if n.token != "" { - // The target node is already locked. - return false - } - if !zeroDepth { - // The requested lock depth is infinite, and the fact that n exists - // (n != nil) means that a descendent of the target node is locked. - return false - } - } else if n.token != "" && !n.details.ZeroDepth { - // An ancestor of the target node is locked with infinite depth. - return false - } - return true - }) -} - -func (m *memLS) create(name string) (ret *memLSNode) { - walkToRoot(name, func(name0 string, first bool) bool { - n := m.byName[name0] - if n == nil { - n = &memLSNode{ - details: LockDetails{ - Root: name0, - }, - byExpiryIndex: -1, - } - m.byName[name0] = n - } - n.refCount++ - if first { - ret = n - } - return true - }) - return ret -} - -func (m *memLS) remove(n *memLSNode) { - delete(m.byToken, n.token) - n.token = "" - walkToRoot(n.details.Root, func(name0 string, first bool) bool { - x := m.byName[name0] - x.refCount-- - if x.refCount == 0 { - delete(m.byName, name0) - } - return true - }) - if n.byExpiryIndex >= 0 { - heap.Remove(&m.byExpiry, n.byExpiryIndex) - } -} - -func walkToRoot(name string, f func(name0 string, first bool) bool) bool { - for first := true; ; first = false { - if !f(name, first) { - return false - } - if name == "/" { - break - } - name = name[:strings.LastIndex(name, "/")] - if name == "" { - name = "/" - } - } - return true -} - -type memLSNode struct { - // details are the lock metadata. Even if this node's name is not explicitly locked, - // details.Root will still equal the node's name. - details LockDetails - // token is the unique identifier for this node's lock. An empty token means that - // this node is not explicitly locked. - token string - // refCount is the number of self-or-descendent nodes that are explicitly locked. - refCount int - // expiry is when this node's lock expires. - expiry time.Time - // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1 - // if this node does not expire, or has expired. - byExpiryIndex int - // held is whether this node's lock is actively held by a Confirm call. - held bool -} - -type byExpiry []*memLSNode - -func (b *byExpiry) Len() int { - return len(*b) -} - -func (b *byExpiry) Less(i, j int) bool { - return (*b)[i].expiry.Before((*b)[j].expiry) -} - -func (b *byExpiry) Swap(i, j int) { - (*b)[i], (*b)[j] = (*b)[j], (*b)[i] - (*b)[i].byExpiryIndex = i - (*b)[j].byExpiryIndex = j -} - -func (b *byExpiry) Push(x interface{}) { - n := x.(*memLSNode) - n.byExpiryIndex = len(*b) - *b = append(*b, n) -} - -func (b *byExpiry) Pop() interface{} { - i := len(*b) - 1 - n := (*b)[i] - (*b)[i] = nil - n.byExpiryIndex = -1 - *b = (*b)[:i] - return n -} - -const infiniteTimeout = -1 - -// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is -// empty, an infiniteTimeout is returned. -func parseTimeout(s string) (time.Duration, error) { - if s == "" { - return infiniteTimeout, nil - } - if i := strings.IndexByte(s, ','); i >= 0 { - s = s[:i] - } - s = strings.TrimSpace(s) - if s == "Infinite" { - return infiniteTimeout, nil - } - const pre = "Second-" - if !strings.HasPrefix(s, pre) { - return 0, errInvalidTimeout - } - s = s[len(pre):] - if s == "" || s[0] < '0' || '9' < s[0] { - return 0, errInvalidTimeout - } - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || 1<<32-1 < n { - return 0, errInvalidTimeout - } - return time.Duration(n) * time.Second, nil -} diff --git a/vendor/golang.org/x/net/webdav/lock_test.go b/vendor/golang.org/x/net/webdav/lock_test.go deleted file mode 100644 index 5cf14cda..00000000 --- a/vendor/golang.org/x/net/webdav/lock_test.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -import ( - "fmt" - "math/rand" - "path" - "reflect" - "sort" - "strconv" - "strings" - "testing" - "time" -) - -func TestWalkToRoot(t *testing.T) { - testCases := []struct { - name string - want []string - }{{ - "/a/b/c/d", - []string{ - "/a/b/c/d", - "/a/b/c", - "/a/b", - "/a", - "/", - }, - }, { - "/a", - []string{ - "/a", - "/", - }, - }, { - "/", - []string{ - "/", - }, - }} - - for _, tc := range testCases { - var got []string - if !walkToRoot(tc.name, func(name0 string, first bool) bool { - if first != (len(got) == 0) { - t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got)) - return false - } - got = append(got, name0) - return true - }) { - continue - } - if !reflect.DeepEqual(got, tc.want) { - t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want) - } - } -} - -var lockTestDurations = []time.Duration{ - infiniteTimeout, // infiniteTimeout means to never expire. - 0, // A zero duration means to expire immediately. - 100 * time.Hour, // A very large duration will not expire in these tests. -} - -// lockTestNames are the names of a set of mutually compatible locks. For each -// name fragment: -// - _ means no explicit lock. -// - i means an infinite-depth lock, -// - z means a zero-depth lock, -var lockTestNames = []string{ - "/_/_/_/_/z", - "/_/_/i", - "/_/z", - "/_/z/i", - "/_/z/z", - "/_/z/_/i", - "/_/z/_/z", - "/i", - "/z", - "/z/_/i", - "/z/_/z", -} - -func lockTestZeroDepth(name string) bool { - switch name[len(name)-1] { - case 'i': - return false - case 'z': - return true - } - panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name)) -} - -func TestMemLSCanCreate(t *testing.T) { - now := time.Unix(0, 0) - m := NewMemLS().(*memLS) - - for _, name := range lockTestNames { - _, err := m.Create(now, LockDetails{ - Root: name, - Duration: infiniteTimeout, - ZeroDepth: lockTestZeroDepth(name), - }) - if err != nil { - t.Fatalf("creating lock for %q: %v", name, err) - } - } - - wantCanCreate := func(name string, zeroDepth bool) bool { - for _, n := range lockTestNames { - switch { - case n == name: - // An existing lock has the same name as the proposed lock. - return false - case strings.HasPrefix(n, name): - // An existing lock would be a child of the proposed lock, - // which conflicts if the proposed lock has infinite depth. - if !zeroDepth { - return false - } - case strings.HasPrefix(name, n): - // An existing lock would be an ancestor of the proposed lock, - // which conflicts if the ancestor has infinite depth. - if n[len(n)-1] == 'i' { - return false - } - } - } - return true - } - - var check func(int, string) - check = func(recursion int, name string) { - for _, zeroDepth := range []bool{false, true} { - got := m.canCreate(name, zeroDepth) - want := wantCanCreate(name, zeroDepth) - if got != want { - t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want) - } - } - if recursion == 6 { - return - } - if name != "/" { - name += "/" - } - for _, c := range "_iz" { - check(recursion+1, name+string(c)) - } - } - check(0, "/") -} - -func TestMemLSLookup(t *testing.T) { - now := time.Unix(0, 0) - m := NewMemLS().(*memLS) - - badToken := m.nextToken() - t.Logf("badToken=%q", badToken) - - for _, name := range lockTestNames { - token, err := m.Create(now, LockDetails{ - Root: name, - Duration: infiniteTimeout, - ZeroDepth: lockTestZeroDepth(name), - }) - if err != nil { - t.Fatalf("creating lock for %q: %v", name, err) - } - t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token) - } - - baseNames := append([]string{"/a", "/b/c"}, lockTestNames...) - for _, baseName := range baseNames { - for _, suffix := range []string{"", "/0", "/1/2/3"} { - name := baseName + suffix - - goodToken := "" - base := m.byName[baseName] - if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) { - goodToken = base.token - } - - for _, token := range []string{badToken, goodToken} { - if token == "" { - continue - } - - got := m.lookup(name, Condition{Token: token}) - want := base - if token == badToken { - want = nil - } - if got != want { - t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p", - name, token, token == badToken, got, want) - } - } - } - } -} - -func TestMemLSConfirm(t *testing.T) { - now := time.Unix(0, 0) - m := NewMemLS().(*memLS) - alice, err := m.Create(now, LockDetails{ - Root: "/alice", - Duration: infiniteTimeout, - ZeroDepth: false, - }) - tweedle, err := m.Create(now, LockDetails{ - Root: "/tweedle", - Duration: infiniteTimeout, - ZeroDepth: false, - }) - if err != nil { - t.Fatalf("Create: %v", err) - } - if err := m.consistent(); err != nil { - t.Fatalf("Create: inconsistent state: %v", err) - } - - // Test a mismatch between name and condition. - _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice}) - if err != ErrConfirmationFailed { - t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err) - } - if err := m.consistent(); err != nil { - t.Fatalf("Confirm (mismatch): inconsistent state: %v", err) - } - - // Test two names (that fall under the same lock) in the one Confirm call. - release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle}) - if err != nil { - t.Fatalf("Confirm (twins): %v", err) - } - if err := m.consistent(); err != nil { - t.Fatalf("Confirm (twins): inconsistent state: %v", err) - } - release() - if err := m.consistent(); err != nil { - t.Fatalf("release (twins): inconsistent state: %v", err) - } - - // Test the same two names in overlapping Confirm / release calls. - releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle}) - if err != nil { - t.Fatalf("Confirm (sequence #0): %v", err) - } - if err := m.consistent(); err != nil { - t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err) - } - - _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) - if err != ErrConfirmationFailed { - t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err) - } - if err := m.consistent(); err != nil { - t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err) - } - - releaseDee() - if err := m.consistent(); err != nil { - t.Fatalf("release (sequence #2): inconsistent state: %v", err) - } - - releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) - if err != nil { - t.Fatalf("Confirm (sequence #3): %v", err) - } - if err := m.consistent(); err != nil { - t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err) - } - - // Test that you can't unlock a held lock. - err = m.Unlock(now, tweedle) - if err != ErrLocked { - t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err) - } - - releaseDum() - if err := m.consistent(); err != nil { - t.Fatalf("release (sequence #5): inconsistent state: %v", err) - } - - err = m.Unlock(now, tweedle) - if err != nil { - t.Fatalf("Unlock (sequence #6): %v", err) - } - if err := m.consistent(); err != nil { - t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err) - } -} - -func TestMemLSNonCanonicalRoot(t *testing.T) { - now := time.Unix(0, 0) - m := NewMemLS().(*memLS) - token, err := m.Create(now, LockDetails{ - Root: "/foo/./bar//", - Duration: 1 * time.Second, - }) - if err != nil { - t.Fatalf("Create: %v", err) - } - if err := m.consistent(); err != nil { - t.Fatalf("Create: inconsistent state: %v", err) - } - if err := m.Unlock(now, token); err != nil { - t.Fatalf("Unlock: %v", err) - } - if err := m.consistent(); err != nil { - t.Fatalf("Unlock: inconsistent state: %v", err) - } -} - -func TestMemLSExpiry(t *testing.T) { - m := NewMemLS().(*memLS) - testCases := []string{ - "setNow 0", - "create /a.5", - "want /a.5", - "create /c.6", - "want /a.5 /c.6", - "create /a/b.7", - "want /a.5 /a/b.7 /c.6", - "setNow 4", - "want /a.5 /a/b.7 /c.6", - "setNow 5", - "want /a/b.7 /c.6", - "setNow 6", - "want /a/b.7", - "setNow 7", - "want ", - "setNow 8", - "want ", - "create /a.12", - "create /b.13", - "create /c.15", - "create /a/d.16", - "want /a.12 /a/d.16 /b.13 /c.15", - "refresh /a.14", - "want /a.14 /a/d.16 /b.13 /c.15", - "setNow 12", - "want /a.14 /a/d.16 /b.13 /c.15", - "setNow 13", - "want /a.14 /a/d.16 /c.15", - "setNow 14", - "want /a/d.16 /c.15", - "refresh /a/d.20", - "refresh /c.20", - "want /a/d.20 /c.20", - "setNow 20", - "want ", - } - - tokens := map[string]string{} - zTime := time.Unix(0, 0) - now := zTime - for i, tc := range testCases { - j := strings.IndexByte(tc, ' ') - if j < 0 { - t.Fatalf("test case #%d %q: invalid command", i, tc) - } - op, arg := tc[:j], tc[j+1:] - switch op { - default: - t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) - - case "create", "refresh": - parts := strings.Split(arg, ".") - if len(parts) != 2 { - t.Fatalf("test case #%d %q: invalid create", i, tc) - } - root := parts[0] - d, err := strconv.Atoi(parts[1]) - if err != nil { - t.Fatalf("test case #%d %q: invalid duration", i, tc) - } - dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now) - - switch op { - case "create": - token, err := m.Create(now, LockDetails{ - Root: root, - Duration: dur, - ZeroDepth: true, - }) - if err != nil { - t.Fatalf("test case #%d %q: Create: %v", i, tc, err) - } - tokens[root] = token - - case "refresh": - token := tokens[root] - if token == "" { - t.Fatalf("test case #%d %q: no token for %q", i, tc, root) - } - got, err := m.Refresh(now, token, dur) - if err != nil { - t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err) - } - want := LockDetails{ - Root: root, - Duration: dur, - ZeroDepth: true, - } - if got != want { - t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want) - } - } - - case "setNow": - d, err := strconv.Atoi(arg) - if err != nil { - t.Fatalf("test case #%d %q: invalid duration", i, tc) - } - now = time.Unix(0, 0).Add(time.Duration(d) * time.Second) - - case "want": - m.mu.Lock() - m.collectExpiredNodes(now) - got := make([]string, 0, len(m.byToken)) - for _, n := range m.byToken { - got = append(got, fmt.Sprintf("%s.%d", - n.details.Root, n.expiry.Sub(zTime)/time.Second)) - } - m.mu.Unlock() - sort.Strings(got) - want := []string{} - if arg != "" { - want = strings.Split(arg, " ") - } - if !reflect.DeepEqual(got, want) { - t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want) - } - } - - if err := m.consistent(); err != nil { - t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err) - } - } -} - -func TestMemLS(t *testing.T) { - now := time.Unix(0, 0) - m := NewMemLS().(*memLS) - rng := rand.New(rand.NewSource(0)) - tokens := map[string]string{} - nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0 - const N = 2000 - - for i := 0; i < N; i++ { - name := lockTestNames[rng.Intn(len(lockTestNames))] - duration := lockTestDurations[rng.Intn(len(lockTestDurations))] - confirmed, unlocked := false, false - - // If the name was already locked, we randomly confirm/release, refresh - // or unlock it. Otherwise, we create a lock. - token := tokens[name] - if token != "" { - switch rng.Intn(3) { - case 0: - confirmed = true - nConfirm++ - release, err := m.Confirm(now, name, "", Condition{Token: token}) - if err != nil { - t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err) - } - if err := m.consistent(); err != nil { - t.Fatalf("iteration #%d: inconsistent state: %v", i, err) - } - release() - - case 1: - nRefresh++ - if _, err := m.Refresh(now, token, duration); err != nil { - t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err) - } - - case 2: - unlocked = true - nUnlock++ - if err := m.Unlock(now, token); err != nil { - t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err) - } - } - - } else { - nCreate++ - var err error - token, err = m.Create(now, LockDetails{ - Root: name, - Duration: duration, - ZeroDepth: lockTestZeroDepth(name), - }) - if err != nil { - t.Fatalf("iteration #%d: Create %q: %v", i, name, err) - } - } - - if !confirmed { - if duration == 0 || unlocked { - // A zero-duration lock should expire immediately and is - // effectively equivalent to being unlocked. - tokens[name] = "" - } else { - tokens[name] = token - } - } - - if err := m.consistent(); err != nil { - t.Fatalf("iteration #%d: inconsistent state: %v", i, err) - } - } - - if nConfirm < N/10 { - t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10) - } - if nCreate < N/10 { - t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10) - } - if nRefresh < N/10 { - t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10) - } - if nUnlock < N/10 { - t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10) - } -} - -func (m *memLS) consistent() error { - m.mu.Lock() - defer m.mu.Unlock() - - // If m.byName is non-empty, then it must contain an entry for the root "/", - // and its refCount should equal the number of locked nodes. - if len(m.byName) > 0 { - n := m.byName["/"] - if n == nil { - return fmt.Errorf(`non-empty m.byName does not contain the root "/"`) - } - if n.refCount != len(m.byToken) { - return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken)) - } - } - - for name, n := range m.byName { - // The map keys should be consistent with the node's copy of the key. - if n.details.Root != name { - return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name) - } - - // A name must be clean, and start with a "/". - if len(name) == 0 || name[0] != '/' { - return fmt.Errorf(`node name %q does not start with "/"`, name) - } - if name != path.Clean(name) { - return fmt.Errorf(`node name %q is not clean`, name) - } - - // A node's refCount should be positive. - if n.refCount <= 0 { - return fmt.Errorf("non-positive refCount for node at name %q", name) - } - - // A node's refCount should be the number of self-or-descendents that - // are locked (i.e. have a non-empty token). - var list []string - for name0, n0 := range m.byName { - // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z', - // so strings.HasPrefix is equivalent to self-or-descendent name match. - // We don't have to worry about "/foo/bar" being a false positive match - // for "/foo/b". - if strings.HasPrefix(name0, name) && n0.token != "" { - list = append(list, name0) - } - } - if n.refCount != len(list) { - sort.Strings(list) - return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)", - name, n.refCount, list, len(list)) - } - - // A node n is in m.byToken if it has a non-empty token. - if n.token != "" { - if _, ok := m.byToken[n.token]; !ok { - return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token) - } - } - - // A node n is in m.byExpiry if it has a non-negative byExpiryIndex. - if n.byExpiryIndex >= 0 { - if n.byExpiryIndex >= len(m.byExpiry) { - return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry)) - } - if n != m.byExpiry[n.byExpiryIndex] { - return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex) - } - } - } - - for token, n := range m.byToken { - // The map keys should be consistent with the node's copy of the key. - if n.token != token { - return fmt.Errorf("node token %q != byToken map key %q", n.token, token) - } - - // Every node in m.byToken is in m.byName. - if _, ok := m.byName[n.details.Root]; !ok { - return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root) - } - } - - for i, n := range m.byExpiry { - // The slice indices should be consistent with the node's copy of the index. - if n.byExpiryIndex != i { - return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i) - } - - // Every node in m.byExpiry is in m.byName. - if _, ok := m.byName[n.details.Root]; !ok { - return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root) - } - - // No node in m.byExpiry should be held. - if n.held { - return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root) - } - } - return nil -} - -func TestParseTimeout(t *testing.T) { - testCases := []struct { - s string - want time.Duration - wantErr error - }{{ - "", - infiniteTimeout, - nil, - }, { - "Infinite", - infiniteTimeout, - nil, - }, { - "Infinitesimal", - 0, - errInvalidTimeout, - }, { - "infinite", - 0, - errInvalidTimeout, - }, { - "Second-0", - 0 * time.Second, - nil, - }, { - "Second-123", - 123 * time.Second, - nil, - }, { - " Second-456 ", - 456 * time.Second, - nil, - }, { - "Second-4100000000", - 4100000000 * time.Second, - nil, - }, { - "junk", - 0, - errInvalidTimeout, - }, { - "Second-", - 0, - errInvalidTimeout, - }, { - "Second--1", - 0, - errInvalidTimeout, - }, { - "Second--123", - 0, - errInvalidTimeout, - }, { - "Second-+123", - 0, - errInvalidTimeout, - }, { - "Second-0x123", - 0, - errInvalidTimeout, - }, { - "second-123", - 0, - errInvalidTimeout, - }, { - "Second-4294967295", - 4294967295 * time.Second, - nil, - }, { - // Section 10.7 says that "The timeout value for TimeType "Second" - // must not be greater than 2^32-1." - "Second-4294967296", - 0, - errInvalidTimeout, - }, { - // This test case comes from section 9.10.9 of the spec. It says, - // - // "In this request, the client has specified that it desires an - // infinite-length lock, if available, otherwise a timeout of 4.1 - // billion seconds, if available." - // - // The Go WebDAV package always supports infinite length locks, - // and ignores the fallback after the comma. - "Infinite, Second-4100000000", - infiniteTimeout, - nil, - }} - - for _, tc := range testCases { - got, gotErr := parseTimeout(tc.s) - if got != tc.want || gotErr != tc.wantErr { - t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr) - } - } -} diff --git a/vendor/golang.org/x/net/webdav/prop.go b/vendor/golang.org/x/net/webdav/prop.go deleted file mode 100644 index e36a3b31..00000000 --- a/vendor/golang.org/x/net/webdav/prop.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "mime" - "net/http" - "os" - "path/filepath" - "strconv" - - "golang.org/x/net/context" -) - -// Proppatch describes a property update instruction as defined in RFC 4918. -// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH -type Proppatch struct { - // Remove specifies whether this patch removes properties. If it does not - // remove them, it sets them. - Remove bool - // Props contains the properties to be set or removed. - Props []Property -} - -// Propstat describes a XML propstat element as defined in RFC 4918. -// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat -type Propstat struct { - // Props contains the properties for which Status applies. - Props []Property - - // Status defines the HTTP status code of the properties in Prop. - // Allowed values include, but are not limited to the WebDAV status - // code extensions for HTTP/1.1. - // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 - Status int - - // XMLError contains the XML representation of the optional error element. - // XML content within this field must not rely on any predefined - // namespace declarations or prefixes. If empty, the XML error element - // is omitted. - XMLError string - - // ResponseDescription contains the contents of the optional - // responsedescription field. If empty, the XML element is omitted. - ResponseDescription string -} - -// makePropstats returns a slice containing those of x and y whose Props slice -// is non-empty. If both are empty, it returns a slice containing an otherwise -// zero Propstat whose HTTP status code is 200 OK. -func makePropstats(x, y Propstat) []Propstat { - pstats := make([]Propstat, 0, 2) - if len(x.Props) != 0 { - pstats = append(pstats, x) - } - if len(y.Props) != 0 { - pstats = append(pstats, y) - } - if len(pstats) == 0 { - pstats = append(pstats, Propstat{ - Status: http.StatusOK, - }) - } - return pstats -} - -// DeadPropsHolder holds the dead properties of a resource. -// -// Dead properties are those properties that are explicitly defined. In -// comparison, live properties, such as DAV:getcontentlength, are implicitly -// defined by the underlying resource, and cannot be explicitly overridden or -// removed. See the Terminology section of -// http://www.webdav.org/specs/rfc4918.html#rfc.section.3 -// -// There is a whitelist of the names of live properties. This package handles -// all live properties, and will only pass non-whitelisted names to the Patch -// method of DeadPropsHolder implementations. -type DeadPropsHolder interface { - // DeadProps returns a copy of the dead properties held. - DeadProps() (map[xml.Name]Property, error) - - // Patch patches the dead properties held. - // - // Patching is atomic; either all or no patches succeed. It returns (nil, - // non-nil) if an internal server error occurred, otherwise the Propstats - // collectively contain one Property for each proposed patch Property. If - // all patches succeed, Patch returns a slice of length one and a Propstat - // element with a 200 OK HTTP status code. If none succeed, for reasons - // other than an internal server error, no Propstat has status 200 OK. - // - // For more details on when various HTTP status codes apply, see - // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status - Patch([]Proppatch) ([]Propstat, error) -} - -// liveProps contains all supported, protected DAV: properties. -var liveProps = map[xml.Name]struct { - // findFn implements the propfind function of this property. If nil, - // it indicates a hidden property. - findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error) - // dir is true if the property applies to directories. - dir bool -}{ - {Space: "DAV:", Local: "resourcetype"}: { - findFn: findResourceType, - dir: true, - }, - {Space: "DAV:", Local: "displayname"}: { - findFn: findDisplayName, - dir: true, - }, - {Space: "DAV:", Local: "getcontentlength"}: { - findFn: findContentLength, - dir: false, - }, - {Space: "DAV:", Local: "getlastmodified"}: { - findFn: findLastModified, - // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified - // suggests that getlastmodified should only apply to GETable - // resources, and this package does not support GET on directories. - // - // Nonetheless, some WebDAV clients expect child directories to be - // sortable by getlastmodified date, so this value is true, not false. - // See golang.org/issue/15334. - dir: true, - }, - {Space: "DAV:", Local: "creationdate"}: { - findFn: nil, - dir: false, - }, - {Space: "DAV:", Local: "getcontentlanguage"}: { - findFn: nil, - dir: false, - }, - {Space: "DAV:", Local: "getcontenttype"}: { - findFn: findContentType, - dir: false, - }, - {Space: "DAV:", Local: "getetag"}: { - findFn: findETag, - // findETag implements ETag as the concatenated hex values of a file's - // modification time and size. This is not a reliable synchronization - // mechanism for directories, so we do not advertise getetag for DAV - // collections. - dir: false, - }, - - // TODO: The lockdiscovery property requires LockSystem to list the - // active locks on a resource. - {Space: "DAV:", Local: "lockdiscovery"}: {}, - {Space: "DAV:", Local: "supportedlock"}: { - findFn: findSupportedLock, - dir: true, - }, -} - -// TODO(nigeltao) merge props and allprop? - -// Props returns the status of the properties named pnames for resource name. -// -// Each Propstat has a unique status and each property name will only be part -// of one Propstat element. -func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { - f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) - if err != nil { - return nil, err - } - defer f.Close() - fi, err := f.Stat() - if err != nil { - return nil, err - } - isDir := fi.IsDir() - - var deadProps map[xml.Name]Property - if dph, ok := f.(DeadPropsHolder); ok { - deadProps, err = dph.DeadProps() - if err != nil { - return nil, err - } - } - - pstatOK := Propstat{Status: http.StatusOK} - pstatNotFound := Propstat{Status: http.StatusNotFound} - for _, pn := range pnames { - // If this file has dead properties, check if they contain pn. - if dp, ok := deadProps[pn]; ok { - pstatOK.Props = append(pstatOK.Props, dp) - continue - } - // Otherwise, it must either be a live property or we don't know it. - if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { - innerXML, err := prop.findFn(ctx, fs, ls, name, fi) - if err != nil { - return nil, err - } - pstatOK.Props = append(pstatOK.Props, Property{ - XMLName: pn, - InnerXML: []byte(innerXML), - }) - } else { - pstatNotFound.Props = append(pstatNotFound.Props, Property{ - XMLName: pn, - }) - } - } - return makePropstats(pstatOK, pstatNotFound), nil -} - -// Propnames returns the property names defined for resource name. -func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { - f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) - if err != nil { - return nil, err - } - defer f.Close() - fi, err := f.Stat() - if err != nil { - return nil, err - } - isDir := fi.IsDir() - - var deadProps map[xml.Name]Property - if dph, ok := f.(DeadPropsHolder); ok { - deadProps, err = dph.DeadProps() - if err != nil { - return nil, err - } - } - - pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps)) - for pn, prop := range liveProps { - if prop.findFn != nil && (prop.dir || !isDir) { - pnames = append(pnames, pn) - } - } - for pn := range deadProps { - pnames = append(pnames, pn) - } - return pnames, nil -} - -// Allprop returns the properties defined for resource name and the properties -// named in include. -// -// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined -// within the RFC plus dead properties. Other live properties should only be -// returned if they are named in 'include'. -// -// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND -func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { - pnames, err := propnames(ctx, fs, ls, name) - if err != nil { - return nil, err - } - // Add names from include if they are not already covered in pnames. - nameset := make(map[xml.Name]bool) - for _, pn := range pnames { - nameset[pn] = true - } - for _, pn := range include { - if !nameset[pn] { - pnames = append(pnames, pn) - } - } - return props(ctx, fs, ls, name, pnames) -} - -// Patch patches the properties of resource name. The return values are -// constrained in the same manner as DeadPropsHolder.Patch. -func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { - conflict := false -loop: - for _, patch := range patches { - for _, p := range patch.Props { - if _, ok := liveProps[p.XMLName]; ok { - conflict = true - break loop - } - } - } - if conflict { - pstatForbidden := Propstat{ - Status: http.StatusForbidden, - XMLError: ``, - } - pstatFailedDep := Propstat{ - Status: StatusFailedDependency, - } - for _, patch := range patches { - for _, p := range patch.Props { - if _, ok := liveProps[p.XMLName]; ok { - pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName}) - } else { - pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName}) - } - } - } - return makePropstats(pstatForbidden, pstatFailedDep), nil - } - - f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0) - if err != nil { - return nil, err - } - defer f.Close() - if dph, ok := f.(DeadPropsHolder); ok { - ret, err := dph.Patch(patches) - if err != nil { - return nil, err - } - // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that - // "The contents of the prop XML element must only list the names of - // properties to which the result in the status element applies." - for _, pstat := range ret { - for i, p := range pstat.Props { - pstat.Props[i] = Property{XMLName: p.XMLName} - } - } - return ret, nil - } - // The file doesn't implement the optional DeadPropsHolder interface, so - // all patches are forbidden. - pstat := Propstat{Status: http.StatusForbidden} - for _, patch := range patches { - for _, p := range patch.Props { - pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) - } - } - return []Propstat{pstat}, nil -} - -func escapeXML(s string) string { - for i := 0; i < len(s); i++ { - // As an optimization, if s contains only ASCII letters, digits or a - // few special characters, the escaped value is s itself and we don't - // need to allocate a buffer and convert between string and []byte. - switch c := s[i]; { - case c == ' ' || c == '_' || - ('+' <= c && c <= '9') || // Digits as well as + , - . and / - ('A' <= c && c <= 'Z') || - ('a' <= c && c <= 'z'): - continue - } - // Otherwise, go through the full escaping process. - var buf bytes.Buffer - xml.EscapeText(&buf, []byte(s)) - return buf.String() - } - return s -} - -func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { - if fi.IsDir() { - return ``, nil - } - return "", nil -} - -func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { - if slashClean(name) == "/" { - // Hide the real name of a possibly prefixed root directory. - return "", nil - } - return escapeXML(fi.Name()), nil -} - -func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { - return strconv.FormatInt(fi.Size(), 10), nil -} - -func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { - return fi.ModTime().Format(http.TimeFormat), nil -} - -func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { - f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) - if err != nil { - return "", err - } - defer f.Close() - // This implementation is based on serveContent's code in the standard net/http package. - ctype := mime.TypeByExtension(filepath.Ext(name)) - if ctype != "" { - return ctype, nil - } - // Read a chunk to decide between utf-8 text and binary. - var buf [512]byte - n, err := io.ReadFull(f, buf[:]) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return "", err - } - ctype = http.DetectContentType(buf[:n]) - // Rewind file. - _, err = f.Seek(0, os.SEEK_SET) - return ctype, err -} - -func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { - // The Apache http 2.4 web server by default concatenates the - // modification time and size of a file. We replicate the heuristic - // with nanosecond granularity. - return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil -} - -func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { - return `` + - `` + - `` + - `` + - ``, nil -} diff --git a/vendor/golang.org/x/net/webdav/prop_test.go b/vendor/golang.org/x/net/webdav/prop_test.go deleted file mode 100644 index 57d0e826..00000000 --- a/vendor/golang.org/x/net/webdav/prop_test.go +++ /dev/null @@ -1,613 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -import ( - "encoding/xml" - "fmt" - "net/http" - "os" - "reflect" - "sort" - "testing" - - "golang.org/x/net/context" -) - -func TestMemPS(t *testing.T) { - ctx := context.Background() - // calcProps calculates the getlastmodified and getetag DAV: property - // values in pstats for resource name in file-system fs. - calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { - fi, err := fs.Stat(ctx, name) - if err != nil { - return err - } - for _, pst := range pstats { - for i, p := range pst.Props { - switch p.XMLName { - case xml.Name{Space: "DAV:", Local: "getlastmodified"}: - p.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat)) - pst.Props[i] = p - case xml.Name{Space: "DAV:", Local: "getetag"}: - if fi.IsDir() { - continue - } - etag, err := findETag(ctx, fs, ls, name, fi) - if err != nil { - return err - } - p.InnerXML = []byte(etag) - pst.Props[i] = p - } - } - } - return nil - } - - const ( - lockEntry = `` + - `` + - `` + - `` + - `` - statForbiddenError = `` - ) - - type propOp struct { - op string - name string - pnames []xml.Name - patches []Proppatch - wantPnames []xml.Name - wantPropstats []Propstat - } - - testCases := []struct { - desc string - noDeadProps bool - buildfs []string - propOp []propOp - }{{ - desc: "propname", - buildfs: []string{"mkdir /dir", "touch /file"}, - propOp: []propOp{{ - op: "propname", - name: "/dir", - wantPnames: []xml.Name{ - {Space: "DAV:", Local: "resourcetype"}, - {Space: "DAV:", Local: "displayname"}, - {Space: "DAV:", Local: "supportedlock"}, - {Space: "DAV:", Local: "getlastmodified"}, - }, - }, { - op: "propname", - name: "/file", - wantPnames: []xml.Name{ - {Space: "DAV:", Local: "resourcetype"}, - {Space: "DAV:", Local: "displayname"}, - {Space: "DAV:", Local: "getcontentlength"}, - {Space: "DAV:", Local: "getlastmodified"}, - {Space: "DAV:", Local: "getcontenttype"}, - {Space: "DAV:", Local: "getetag"}, - {Space: "DAV:", Local: "supportedlock"}, - }, - }}, - }, { - desc: "allprop dir and file", - buildfs: []string{"mkdir /dir", "write /file foobarbaz"}, - propOp: []propOp{{ - op: "allprop", - name: "/dir", - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, - InnerXML: []byte(``), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, - InnerXML: []byte("dir"), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, - InnerXML: nil, // Calculated during test. - }, { - XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, - InnerXML: []byte(lockEntry), - }}, - }}, - }, { - op: "allprop", - name: "/file", - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, - InnerXML: []byte(""), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, - InnerXML: []byte("file"), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, - InnerXML: []byte("9"), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, - InnerXML: nil, // Calculated during test. - }, { - XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, - InnerXML: []byte("text/plain; charset=utf-8"), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, - InnerXML: nil, // Calculated during test. - }, { - XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, - InnerXML: []byte(lockEntry), - }}, - }}, - }, { - op: "allprop", - name: "/file", - pnames: []xml.Name{ - {"DAV:", "resourcetype"}, - {"foo", "bar"}, - }, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, - InnerXML: []byte(""), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, - InnerXML: []byte("file"), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, - InnerXML: []byte("9"), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, - InnerXML: nil, // Calculated during test. - }, { - XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, - InnerXML: []byte("text/plain; charset=utf-8"), - }, { - XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, - InnerXML: nil, // Calculated during test. - }, { - XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, - InnerXML: []byte(lockEntry), - }}}, { - Status: http.StatusNotFound, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}}, - }, - }}, - }, { - desc: "propfind DAV:resourcetype", - buildfs: []string{"mkdir /dir", "touch /file"}, - propOp: []propOp{{ - op: "propfind", - name: "/dir", - pnames: []xml.Name{{"DAV:", "resourcetype"}}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, - InnerXML: []byte(``), - }}, - }}, - }, { - op: "propfind", - name: "/file", - pnames: []xml.Name{{"DAV:", "resourcetype"}}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, - InnerXML: []byte(""), - }}, - }}, - }}, - }, { - desc: "propfind unsupported DAV properties", - buildfs: []string{"mkdir /dir"}, - propOp: []propOp{{ - op: "propfind", - name: "/dir", - pnames: []xml.Name{{"DAV:", "getcontentlanguage"}}, - wantPropstats: []Propstat{{ - Status: http.StatusNotFound, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"}, - }}, - }}, - }, { - op: "propfind", - name: "/dir", - pnames: []xml.Name{{"DAV:", "creationdate"}}, - wantPropstats: []Propstat{{ - Status: http.StatusNotFound, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "creationdate"}, - }}, - }}, - }}, - }, { - desc: "propfind getetag for files but not for directories", - buildfs: []string{"mkdir /dir", "touch /file"}, - propOp: []propOp{{ - op: "propfind", - name: "/dir", - pnames: []xml.Name{{"DAV:", "getetag"}}, - wantPropstats: []Propstat{{ - Status: http.StatusNotFound, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, - }}, - }}, - }, { - op: "propfind", - name: "/file", - pnames: []xml.Name{{"DAV:", "getetag"}}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, - InnerXML: nil, // Calculated during test. - }}, - }}, - }}, - }, { - desc: "proppatch property on no-dead-properties file system", - buildfs: []string{"mkdir /dir"}, - noDeadProps: true, - propOp: []propOp{{ - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusForbidden, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - }, { - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusForbidden, - XMLError: statForbiddenError, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, - }}, - }}, - }}, - }, { - desc: "proppatch dead property", - buildfs: []string{"mkdir /dir"}, - propOp: []propOp{{ - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - InnerXML: []byte("baz"), - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - }, { - op: "propfind", - name: "/dir", - pnames: []xml.Name{{Space: "foo", Local: "bar"}}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - InnerXML: []byte("baz"), - }}, - }}, - }}, - }, { - desc: "proppatch dead property with failed dependency", - buildfs: []string{"mkdir /dir"}, - propOp: []propOp{{ - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - InnerXML: []byte("baz"), - }}, - }, { - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, - InnerXML: []byte("xxx"), - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusForbidden, - XMLError: statForbiddenError, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, - }}, - }, { - Status: StatusFailedDependency, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - }, { - op: "propfind", - name: "/dir", - pnames: []xml.Name{{Space: "foo", Local: "bar"}}, - wantPropstats: []Propstat{{ - Status: http.StatusNotFound, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - }}, - }, { - desc: "proppatch remove dead property", - buildfs: []string{"mkdir /dir"}, - propOp: []propOp{{ - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - InnerXML: []byte("baz"), - }, { - XMLName: xml.Name{Space: "spam", Local: "ham"}, - InnerXML: []byte("eggs"), - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }, { - XMLName: xml.Name{Space: "spam", Local: "ham"}, - }}, - }}, - }, { - op: "propfind", - name: "/dir", - pnames: []xml.Name{ - {Space: "foo", Local: "bar"}, - {Space: "spam", Local: "ham"}, - }, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - InnerXML: []byte("baz"), - }, { - XMLName: xml.Name{Space: "spam", Local: "ham"}, - InnerXML: []byte("eggs"), - }}, - }}, - }, { - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Remove: true, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - }, { - op: "propfind", - name: "/dir", - pnames: []xml.Name{ - {Space: "foo", Local: "bar"}, - {Space: "spam", Local: "ham"}, - }, - wantPropstats: []Propstat{{ - Status: http.StatusNotFound, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }, { - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "spam", Local: "ham"}, - InnerXML: []byte("eggs"), - }}, - }}, - }}, - }, { - desc: "propname with dead property", - buildfs: []string{"touch /file"}, - propOp: []propOp{{ - op: "proppatch", - name: "/file", - patches: []Proppatch{{ - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - InnerXML: []byte("baz"), - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - }, { - op: "propname", - name: "/file", - wantPnames: []xml.Name{ - {Space: "DAV:", Local: "resourcetype"}, - {Space: "DAV:", Local: "displayname"}, - {Space: "DAV:", Local: "getcontentlength"}, - {Space: "DAV:", Local: "getlastmodified"}, - {Space: "DAV:", Local: "getcontenttype"}, - {Space: "DAV:", Local: "getetag"}, - {Space: "DAV:", Local: "supportedlock"}, - {Space: "foo", Local: "bar"}, - }, - }}, - }, { - desc: "proppatch remove unknown dead property", - buildfs: []string{"mkdir /dir"}, - propOp: []propOp{{ - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Remove: true, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - }}, - }, { - desc: "bad: propfind unknown property", - buildfs: []string{"mkdir /dir"}, - propOp: []propOp{{ - op: "propfind", - name: "/dir", - pnames: []xml.Name{{"foo:", "bar"}}, - wantPropstats: []Propstat{{ - Status: http.StatusNotFound, - Props: []Property{{ - XMLName: xml.Name{Space: "foo:", Local: "bar"}, - }}, - }}, - }}, - }} - - for _, tc := range testCases { - fs, err := buildTestFS(tc.buildfs) - if err != nil { - t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) - } - if tc.noDeadProps { - fs = noDeadPropsFS{fs} - } - ls := NewMemLS() - for _, op := range tc.propOp { - desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name) - if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil { - t.Fatalf("%s: calcProps: %v", desc, err) - } - - // Call property system. - var propstats []Propstat - switch op.op { - case "propname": - pnames, err := propnames(ctx, fs, ls, op.name) - if err != nil { - t.Errorf("%s: got error %v, want nil", desc, err) - continue - } - sort.Sort(byXMLName(pnames)) - sort.Sort(byXMLName(op.wantPnames)) - if !reflect.DeepEqual(pnames, op.wantPnames) { - t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames) - } - continue - case "allprop": - propstats, err = allprop(ctx, fs, ls, op.name, op.pnames) - case "propfind": - propstats, err = props(ctx, fs, ls, op.name, op.pnames) - case "proppatch": - propstats, err = patch(ctx, fs, ls, op.name, op.patches) - default: - t.Fatalf("%s: %s not implemented", desc, op.op) - } - if err != nil { - t.Errorf("%s: got error %v, want nil", desc, err) - continue - } - // Compare return values from allprop, propfind or proppatch. - for _, pst := range propstats { - sort.Sort(byPropname(pst.Props)) - } - for _, pst := range op.wantPropstats { - sort.Sort(byPropname(pst.Props)) - } - sort.Sort(byStatus(propstats)) - sort.Sort(byStatus(op.wantPropstats)) - if !reflect.DeepEqual(propstats, op.wantPropstats) { - t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats) - } - } - } -} - -func cmpXMLName(a, b xml.Name) bool { - if a.Space != b.Space { - return a.Space < b.Space - } - return a.Local < b.Local -} - -type byXMLName []xml.Name - -func (b byXMLName) Len() int { return len(b) } -func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) } - -type byPropname []Property - -func (b byPropname) Len() int { return len(b) } -func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) } - -type byStatus []Propstat - -func (b byStatus) Len() int { return len(b) } -func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status } - -type noDeadPropsFS struct { - FileSystem -} - -func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { - f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm) - if err != nil { - return nil, err - } - return noDeadPropsFile{f}, nil -} - -// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods -// provided by the underlying File implementation. -type noDeadPropsFile struct { - f File -} - -func (f noDeadPropsFile) Close() error { return f.f.Close() } -func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) } -func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) } -func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) } -func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() } -func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) } diff --git a/vendor/golang.org/x/net/webdav/webdav.go b/vendor/golang.org/x/net/webdav/webdav.go deleted file mode 100644 index 7b56687f..00000000 --- a/vendor/golang.org/x/net/webdav/webdav.go +++ /dev/null @@ -1,702 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package webdav provides a WebDAV server implementation. -package webdav // import "golang.org/x/net/webdav" - -import ( - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "path" - "strings" - "time" -) - -type Handler struct { - // Prefix is the URL path prefix to strip from WebDAV resource paths. - Prefix string - // FileSystem is the virtual file system. - FileSystem FileSystem - // LockSystem is the lock management system. - LockSystem LockSystem - // Logger is an optional error logger. If non-nil, it will be called - // for all HTTP requests. - Logger func(*http.Request, error) -} - -func (h *Handler) stripPrefix(p string) (string, int, error) { - if h.Prefix == "" { - return p, http.StatusOK, nil - } - if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) { - return r, http.StatusOK, nil - } - return p, http.StatusNotFound, errPrefixMismatch -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - status, err := http.StatusBadRequest, errUnsupportedMethod - if h.FileSystem == nil { - status, err = http.StatusInternalServerError, errNoFileSystem - } else if h.LockSystem == nil { - status, err = http.StatusInternalServerError, errNoLockSystem - } else { - switch r.Method { - case "OPTIONS": - status, err = h.handleOptions(w, r) - case "GET", "HEAD", "POST": - status, err = h.handleGetHeadPost(w, r) - case "DELETE": - status, err = h.handleDelete(w, r) - case "PUT": - status, err = h.handlePut(w, r) - case "MKCOL": - status, err = h.handleMkcol(w, r) - case "COPY", "MOVE": - status, err = h.handleCopyMove(w, r) - case "LOCK": - status, err = h.handleLock(w, r) - case "UNLOCK": - status, err = h.handleUnlock(w, r) - case "PROPFIND": - status, err = h.handlePropfind(w, r) - case "PROPPATCH": - status, err = h.handleProppatch(w, r) - } - } - - if status != 0 { - w.WriteHeader(status) - if status != http.StatusNoContent { - w.Write([]byte(StatusText(status))) - } - } - if h.Logger != nil { - h.Logger(r, err) - } -} - -func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) { - token, err = h.LockSystem.Create(now, LockDetails{ - Root: root, - Duration: infiniteTimeout, - ZeroDepth: true, - }) - if err != nil { - if err == ErrLocked { - return "", StatusLocked, err - } - return "", http.StatusInternalServerError, err - } - return token, 0, nil -} - -func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) { - hdr := r.Header.Get("If") - if hdr == "" { - // An empty If header means that the client hasn't previously created locks. - // Even if this client doesn't care about locks, we still need to check that - // the resources aren't locked by another client, so we create temporary - // locks that would conflict with another client's locks. These temporary - // locks are unlocked at the end of the HTTP request. - now, srcToken, dstToken := time.Now(), "", "" - if src != "" { - srcToken, status, err = h.lock(now, src) - if err != nil { - return nil, status, err - } - } - if dst != "" { - dstToken, status, err = h.lock(now, dst) - if err != nil { - if srcToken != "" { - h.LockSystem.Unlock(now, srcToken) - } - return nil, status, err - } - } - - return func() { - if dstToken != "" { - h.LockSystem.Unlock(now, dstToken) - } - if srcToken != "" { - h.LockSystem.Unlock(now, srcToken) - } - }, 0, nil - } - - ih, ok := parseIfHeader(hdr) - if !ok { - return nil, http.StatusBadRequest, errInvalidIfHeader - } - // ih is a disjunction (OR) of ifLists, so any ifList will do. - for _, l := range ih.lists { - lsrc := l.resourceTag - if lsrc == "" { - lsrc = src - } else { - u, err := url.Parse(lsrc) - if err != nil { - continue - } - if u.Host != r.Host { - continue - } - lsrc, status, err = h.stripPrefix(u.Path) - if err != nil { - return nil, status, err - } - } - release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...) - if err == ErrConfirmationFailed { - continue - } - if err != nil { - return nil, http.StatusInternalServerError, err - } - return release, 0, nil - } - // Section 10.4.1 says that "If this header is evaluated and all state lists - // fail, then the request must fail with a 412 (Precondition Failed) status." - // We follow the spec even though the cond_put_corrupt_token test case from - // the litmus test warns on seeing a 412 instead of a 423 (Locked). - return nil, http.StatusPreconditionFailed, ErrLocked -} - -func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) { - reqPath, status, err := h.stripPrefix(r.URL.Path) - if err != nil { - return status, err - } - ctx := getContext(r) - allow := "OPTIONS, LOCK, PUT, MKCOL" - if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil { - if fi.IsDir() { - allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND" - } else { - allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT" - } - } - w.Header().Set("Allow", allow) - // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes - w.Header().Set("DAV", "1, 2") - // http://msdn.microsoft.com/en-au/library/cc250217.aspx - w.Header().Set("MS-Author-Via", "DAV") - return 0, nil -} - -func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) { - reqPath, status, err := h.stripPrefix(r.URL.Path) - if err != nil { - return status, err - } - // TODO: check locks for read-only access?? - ctx := getContext(r) - f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0) - if err != nil { - return http.StatusNotFound, err - } - defer f.Close() - fi, err := f.Stat() - if err != nil { - return http.StatusNotFound, err - } - if fi.IsDir() { - return http.StatusMethodNotAllowed, nil - } - etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) - if err != nil { - return http.StatusInternalServerError, err - } - w.Header().Set("ETag", etag) - // Let ServeContent determine the Content-Type header. - http.ServeContent(w, r, reqPath, fi.ModTime(), f) - return 0, nil -} - -func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) { - reqPath, status, err := h.stripPrefix(r.URL.Path) - if err != nil { - return status, err - } - release, status, err := h.confirmLocks(r, reqPath, "") - if err != nil { - return status, err - } - defer release() - - ctx := getContext(r) - - // TODO: return MultiStatus where appropriate. - - // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll - // returns nil (no error)." WebDAV semantics are that it should return a - // "404 Not Found". We therefore have to Stat before we RemoveAll. - if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { - if os.IsNotExist(err) { - return http.StatusNotFound, err - } - return http.StatusMethodNotAllowed, err - } - if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil { - return http.StatusMethodNotAllowed, err - } - return http.StatusNoContent, nil -} - -func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) { - reqPath, status, err := h.stripPrefix(r.URL.Path) - if err != nil { - return status, err - } - release, status, err := h.confirmLocks(r, reqPath, "") - if err != nil { - return status, err - } - defer release() - // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' - // comments in http.checkEtag. - ctx := getContext(r) - - f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - return http.StatusNotFound, err - } - _, copyErr := io.Copy(f, r.Body) - fi, statErr := f.Stat() - closeErr := f.Close() - // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. - if copyErr != nil { - return http.StatusMethodNotAllowed, copyErr - } - if statErr != nil { - return http.StatusMethodNotAllowed, statErr - } - if closeErr != nil { - return http.StatusMethodNotAllowed, closeErr - } - etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) - if err != nil { - return http.StatusInternalServerError, err - } - w.Header().Set("ETag", etag) - return http.StatusCreated, nil -} - -func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) { - reqPath, status, err := h.stripPrefix(r.URL.Path) - if err != nil { - return status, err - } - release, status, err := h.confirmLocks(r, reqPath, "") - if err != nil { - return status, err - } - defer release() - - ctx := getContext(r) - - if r.ContentLength > 0 { - return http.StatusUnsupportedMediaType, nil - } - if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil { - if os.IsNotExist(err) { - return http.StatusConflict, err - } - return http.StatusMethodNotAllowed, err - } - return http.StatusCreated, nil -} - -func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) { - hdr := r.Header.Get("Destination") - if hdr == "" { - return http.StatusBadRequest, errInvalidDestination - } - u, err := url.Parse(hdr) - if err != nil { - return http.StatusBadRequest, errInvalidDestination - } - if u.Host != r.Host { - return http.StatusBadGateway, errInvalidDestination - } - - src, status, err := h.stripPrefix(r.URL.Path) - if err != nil { - return status, err - } - - dst, status, err := h.stripPrefix(u.Path) - if err != nil { - return status, err - } - - if dst == "" { - return http.StatusBadGateway, errInvalidDestination - } - if dst == src { - return http.StatusForbidden, errDestinationEqualsSource - } - - ctx := getContext(r) - - if r.Method == "COPY" { - // Section 7.5.1 says that a COPY only needs to lock the destination, - // not both destination and source. Strictly speaking, this is racy, - // even though a COPY doesn't modify the source, if a concurrent - // operation modifies the source. However, the litmus test explicitly - // checks that COPYing a locked-by-another source is OK. - release, status, err := h.confirmLocks(r, "", dst) - if err != nil { - return status, err - } - defer release() - - // Section 9.8.3 says that "The COPY method on a collection without a Depth - // header must act as if a Depth header with value "infinity" was included". - depth := infiniteDepth - if hdr := r.Header.Get("Depth"); hdr != "" { - depth = parseDepth(hdr) - if depth != 0 && depth != infiniteDepth { - // Section 9.8.3 says that "A client may submit a Depth header on a - // COPY on a collection with a value of "0" or "infinity"." - return http.StatusBadRequest, errInvalidDepth - } - } - return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) - } - - release, status, err := h.confirmLocks(r, src, dst) - if err != nil { - return status, err - } - defer release() - - // Section 9.9.2 says that "The MOVE method on a collection must act as if - // a "Depth: infinity" header was used on it. A client must not submit a - // Depth header on a MOVE on a collection with any value but "infinity"." - if hdr := r.Header.Get("Depth"); hdr != "" { - if parseDepth(hdr) != infiniteDepth { - return http.StatusBadRequest, errInvalidDepth - } - } - return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") -} - -func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) { - duration, err := parseTimeout(r.Header.Get("Timeout")) - if err != nil { - return http.StatusBadRequest, err - } - li, status, err := readLockInfo(r.Body) - if err != nil { - return status, err - } - - ctx := getContext(r) - token, ld, now, created := "", LockDetails{}, time.Now(), false - if li == (lockInfo{}) { - // An empty lockInfo means to refresh the lock. - ih, ok := parseIfHeader(r.Header.Get("If")) - if !ok { - return http.StatusBadRequest, errInvalidIfHeader - } - if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { - token = ih.lists[0].conditions[0].Token - } - if token == "" { - return http.StatusBadRequest, errInvalidLockToken - } - ld, err = h.LockSystem.Refresh(now, token, duration) - if err != nil { - if err == ErrNoSuchLock { - return http.StatusPreconditionFailed, err - } - return http.StatusInternalServerError, err - } - - } else { - // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request, - // then the request MUST act as if a "Depth:infinity" had been submitted." - depth := infiniteDepth - if hdr := r.Header.Get("Depth"); hdr != "" { - depth = parseDepth(hdr) - if depth != 0 && depth != infiniteDepth { - // Section 9.10.3 says that "Values other than 0 or infinity must not be - // used with the Depth header on a LOCK method". - return http.StatusBadRequest, errInvalidDepth - } - } - reqPath, status, err := h.stripPrefix(r.URL.Path) - if err != nil { - return status, err - } - ld = LockDetails{ - Root: reqPath, - Duration: duration, - OwnerXML: li.Owner.InnerXML, - ZeroDepth: depth == 0, - } - token, err = h.LockSystem.Create(now, ld) - if err != nil { - if err == ErrLocked { - return StatusLocked, err - } - return http.StatusInternalServerError, err - } - defer func() { - if retErr != nil { - h.LockSystem.Unlock(now, token) - } - }() - - // Create the resource if it didn't previously exist. - if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { - f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - // TODO: detect missing intermediate dirs and return http.StatusConflict? - return http.StatusInternalServerError, err - } - f.Close() - created = true - } - - // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the - // Lock-Token value is a Coded-URL. We add angle brackets. - w.Header().Set("Lock-Token", "<"+token+">") - } - - w.Header().Set("Content-Type", "application/xml; charset=utf-8") - if created { - // This is "w.WriteHeader(http.StatusCreated)" and not "return - // http.StatusCreated, nil" because we write our own (XML) response to w - // and Handler.ServeHTTP would otherwise write "Created". - w.WriteHeader(http.StatusCreated) - } - writeLockInfo(w, token, ld) - return 0, nil -} - -func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) { - // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the - // Lock-Token value is a Coded-URL. We strip its angle brackets. - t := r.Header.Get("Lock-Token") - if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' { - return http.StatusBadRequest, errInvalidLockToken - } - t = t[1 : len(t)-1] - - switch err = h.LockSystem.Unlock(time.Now(), t); err { - case nil: - return http.StatusNoContent, err - case ErrForbidden: - return http.StatusForbidden, err - case ErrLocked: - return StatusLocked, err - case ErrNoSuchLock: - return http.StatusConflict, err - default: - return http.StatusInternalServerError, err - } -} - -func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) { - reqPath, status, err := h.stripPrefix(r.URL.Path) - if err != nil { - return status, err - } - ctx := getContext(r) - fi, err := h.FileSystem.Stat(ctx, reqPath) - if err != nil { - if os.IsNotExist(err) { - return http.StatusNotFound, err - } - return http.StatusMethodNotAllowed, err - } - depth := infiniteDepth - if hdr := r.Header.Get("Depth"); hdr != "" { - depth = parseDepth(hdr) - if depth == invalidDepth { - return http.StatusBadRequest, errInvalidDepth - } - } - pf, status, err := readPropfind(r.Body) - if err != nil { - return status, err - } - - mw := multistatusWriter{w: w} - - walkFn := func(reqPath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - var pstats []Propstat - if pf.Propname != nil { - pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath) - if err != nil { - return err - } - pstat := Propstat{Status: http.StatusOK} - for _, xmlname := range pnames { - pstat.Props = append(pstat.Props, Property{XMLName: xmlname}) - } - pstats = append(pstats, pstat) - } else if pf.Allprop != nil { - pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) - } else { - pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) - } - if err != nil { - return err - } - return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats)) - } - - walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn) - closeErr := mw.close() - if walkErr != nil { - return http.StatusInternalServerError, walkErr - } - if closeErr != nil { - return http.StatusInternalServerError, closeErr - } - return 0, nil -} - -func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) { - reqPath, status, err := h.stripPrefix(r.URL.Path) - if err != nil { - return status, err - } - release, status, err := h.confirmLocks(r, reqPath, "") - if err != nil { - return status, err - } - defer release() - - ctx := getContext(r) - - if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { - if os.IsNotExist(err) { - return http.StatusNotFound, err - } - return http.StatusMethodNotAllowed, err - } - patches, status, err := readProppatch(r.Body) - if err != nil { - return status, err - } - pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches) - if err != nil { - return http.StatusInternalServerError, err - } - mw := multistatusWriter{w: w} - writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats)) - closeErr := mw.close() - if writeErr != nil { - return http.StatusInternalServerError, writeErr - } - if closeErr != nil { - return http.StatusInternalServerError, closeErr - } - return 0, nil -} - -func makePropstatResponse(href string, pstats []Propstat) *response { - resp := response{ - Href: []string{(&url.URL{Path: href}).EscapedPath()}, - Propstat: make([]propstat, 0, len(pstats)), - } - for _, p := range pstats { - var xmlErr *xmlError - if p.XMLError != "" { - xmlErr = &xmlError{InnerXML: []byte(p.XMLError)} - } - resp.Propstat = append(resp.Propstat, propstat{ - Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)), - Prop: p.Props, - ResponseDescription: p.ResponseDescription, - Error: xmlErr, - }) - } - return &resp -} - -const ( - infiniteDepth = -1 - invalidDepth = -2 -) - -// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and -// infiniteDepth. Parsing any other string returns invalidDepth. -// -// Different WebDAV methods have further constraints on valid depths: -// - PROPFIND has no further restrictions, as per section 9.1. -// - COPY accepts only "0" or "infinity", as per section 9.8.3. -// - MOVE accepts only "infinity", as per section 9.9.2. -// - LOCK accepts only "0" or "infinity", as per section 9.10.3. -// These constraints are enforced by the handleXxx methods. -func parseDepth(s string) int { - switch s { - case "0": - return 0 - case "1": - return 1 - case "infinity": - return infiniteDepth - } - return invalidDepth -} - -// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 -const ( - StatusMulti = 207 - StatusUnprocessableEntity = 422 - StatusLocked = 423 - StatusFailedDependency = 424 - StatusInsufficientStorage = 507 -) - -func StatusText(code int) string { - switch code { - case StatusMulti: - return "Multi-Status" - case StatusUnprocessableEntity: - return "Unprocessable Entity" - case StatusLocked: - return "Locked" - case StatusFailedDependency: - return "Failed Dependency" - case StatusInsufficientStorage: - return "Insufficient Storage" - } - return http.StatusText(code) -} - -var ( - errDestinationEqualsSource = errors.New("webdav: destination equals source") - errDirectoryNotEmpty = errors.New("webdav: directory not empty") - errInvalidDepth = errors.New("webdav: invalid depth") - errInvalidDestination = errors.New("webdav: invalid destination") - errInvalidIfHeader = errors.New("webdav: invalid If header") - errInvalidLockInfo = errors.New("webdav: invalid lock info") - errInvalidLockToken = errors.New("webdav: invalid lock token") - errInvalidPropfind = errors.New("webdav: invalid propfind") - errInvalidProppatch = errors.New("webdav: invalid proppatch") - errInvalidResponse = errors.New("webdav: invalid response") - errInvalidTimeout = errors.New("webdav: invalid timeout") - errNoFileSystem = errors.New("webdav: no file system") - errNoLockSystem = errors.New("webdav: no lock system") - errNotADirectory = errors.New("webdav: not a directory") - errPrefixMismatch = errors.New("webdav: prefix mismatch") - errRecursionTooDeep = errors.New("webdav: recursion too deep") - errUnsupportedLockInfo = errors.New("webdav: unsupported lock info") - errUnsupportedMethod = errors.New("webdav: unsupported method") -) diff --git a/vendor/golang.org/x/net/webdav/webdav_test.go b/vendor/golang.org/x/net/webdav/webdav_test.go deleted file mode 100644 index 25e0d542..00000000 --- a/vendor/golang.org/x/net/webdav/webdav_test.go +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "reflect" - "regexp" - "sort" - "strings" - "testing" - - "golang.org/x/net/context" -) - -// TODO: add tests to check XML responses with the expected prefix path -func TestPrefix(t *testing.T) { - const dst, blah = "Destination", "blah blah blah" - - // createLockBody comes from the example in Section 9.10.7. - const createLockBody = ` - - - - - http://example.org/~ejw/contact.html - - - ` - - do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) { - var bodyReader io.Reader - if body != "" { - bodyReader = strings.NewReader(body) - } - req, err := http.NewRequest(method, urlStr, bodyReader) - if err != nil { - return nil, err - } - for len(headers) >= 2 { - req.Header.Add(headers[0], headers[1]) - headers = headers[2:] - } - res, err := http.DefaultTransport.RoundTrip(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != wantStatusCode { - return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) - } - return res.Header, nil - } - - prefixes := []string{ - "/", - "/a/", - "/a/b/", - "/a/b/c/", - } - ctx := context.Background() - for _, prefix := range prefixes { - fs := NewMemFS() - h := &Handler{ - FileSystem: fs, - LockSystem: NewMemLS(), - } - mux := http.NewServeMux() - if prefix != "/" { - h.Prefix = prefix - } - mux.Handle(prefix, h) - srv := httptest.NewServer(mux) - defer srv.Close() - - // The script is: - // MKCOL /a - // MKCOL /a/b - // PUT /a/b/c - // COPY /a/b/c /a/b/d - // MKCOL /a/b/e - // MOVE /a/b/d /a/b/e/f - // LOCK /a/b/e/g - // PUT /a/b/e/g - // which should yield the (possibly stripped) filenames /a/b/c, - // /a/b/e/f and /a/b/e/g, plus their parent directories. - - wantA := map[string]int{ - "/": http.StatusCreated, - "/a/": http.StatusMovedPermanently, - "/a/b/": http.StatusNotFound, - "/a/b/c/": http.StatusNotFound, - }[prefix] - if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil { - t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) - continue - } - - wantB := map[string]int{ - "/": http.StatusCreated, - "/a/": http.StatusCreated, - "/a/b/": http.StatusMovedPermanently, - "/a/b/c/": http.StatusNotFound, - }[prefix] - if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil { - t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) - continue - } - - wantC := map[string]int{ - "/": http.StatusCreated, - "/a/": http.StatusCreated, - "/a/b/": http.StatusCreated, - "/a/b/c/": http.StatusMovedPermanently, - }[prefix] - if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil { - t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) - continue - } - - wantD := map[string]int{ - "/": http.StatusCreated, - "/a/": http.StatusCreated, - "/a/b/": http.StatusCreated, - "/a/b/c/": http.StatusMovedPermanently, - }[prefix] - if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil { - t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) - continue - } - - wantE := map[string]int{ - "/": http.StatusCreated, - "/a/": http.StatusCreated, - "/a/b/": http.StatusCreated, - "/a/b/c/": http.StatusNotFound, - }[prefix] - if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil { - t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) - continue - } - - wantF := map[string]int{ - "/": http.StatusCreated, - "/a/": http.StatusCreated, - "/a/b/": http.StatusCreated, - "/a/b/c/": http.StatusNotFound, - }[prefix] - if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil { - t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) - continue - } - - var lockToken string - wantG := map[string]int{ - "/": http.StatusCreated, - "/a/": http.StatusCreated, - "/a/b/": http.StatusCreated, - "/a/b/c/": http.StatusNotFound, - }[prefix] - if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil { - t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err) - continue - } else { - lockToken = h.Get("Lock-Token") - } - - ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken) - wantH := map[string]int{ - "/": http.StatusCreated, - "/a/": http.StatusCreated, - "/a/b/": http.StatusCreated, - "/a/b/c/": http.StatusNotFound, - }[prefix] - if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil { - t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err) - continue - } - - got, err := find(ctx, nil, fs, "/") - if err != nil { - t.Errorf("prefix=%-9q find: %v", prefix, err) - continue - } - sort.Strings(got) - want := map[string][]string{ - "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"}, - "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"}, - "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"}, - "/a/b/c/": {"/"}, - }[prefix] - if !reflect.DeepEqual(got, want) { - t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) - continue - } - } -} - -func TestEscapeXML(t *testing.T) { - // These test cases aren't exhaustive, and there is more than one way to - // escape e.g. a quot (as """ or """) or an apos. We presume that - // the encoding/xml package tests xml.EscapeText more thoroughly. This test - // here is just a sanity check for this package's escapeXML function, and - // its attempt to provide a fast path (and avoid a bytes.Buffer allocation) - // when escaping filenames is obviously a no-op. - testCases := map[string]string{ - "": "", - " ": " ", - "&": "&", - "*": "*", - "+": "+", - ",": ",", - "-": "-", - ".": ".", - "/": "/", - "0": "0", - "9": "9", - ":": ":", - "<": "<", - ">": ">", - "A": "A", - "_": "_", - "a": "a", - "~": "~", - "\u0201": "\u0201", - "&": "&amp;", - "foo&baz": "foo&<b/ar>baz", - } - - for in, want := range testCases { - if got := escapeXML(in); got != want { - t.Errorf("in=%q: got %q, want %q", in, got, want) - } - } -} - -func TestFilenameEscape(t *testing.T) { - hrefRe := regexp.MustCompile(`([^<]*)`) - displayNameRe := regexp.MustCompile(`([^<]*)`) - do := func(method, urlStr string) (string, string, error) { - req, err := http.NewRequest(method, urlStr, nil) - if err != nil { - return "", "", err - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - - b, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - hrefMatch := hrefRe.FindStringSubmatch(string(b)) - if len(hrefMatch) != 2 { - return "", "", errors.New("D:href not found") - } - displayNameMatch := displayNameRe.FindStringSubmatch(string(b)) - if len(displayNameMatch) != 2 { - return "", "", errors.New("D:displayname not found") - } - - return hrefMatch[1], displayNameMatch[1], nil - } - - testCases := []struct { - name, wantHref, wantDisplayName string - }{{ - name: `/foo%bar`, - wantHref: `/foo%25bar`, - wantDisplayName: `foo%bar`, - }, { - name: `/ã“ã‚“ã«ã¡ã‚世界`, - wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, - wantDisplayName: `ã“ã‚“ã«ã¡ã‚世界`, - }, { - name: `/Program Files/`, - wantHref: `/Program%20Files`, - wantDisplayName: `Program Files`, - }, { - name: `/go+lang`, - wantHref: `/go+lang`, - wantDisplayName: `go+lang`, - }, { - name: `/go&lang`, - wantHref: `/go&lang`, - wantDisplayName: `go&lang`, - }, { - name: `/goexclusive"` - Shared *struct{} `xml:"lockscope>shared"` - Write *struct{} `xml:"locktype>write"` - Owner owner `xml:"owner"` -} - -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner -type owner struct { - InnerXML string `xml:",innerxml"` -} - -func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { - c := &countingReader{r: r} - if err = ixml.NewDecoder(c).Decode(&li); err != nil { - if err == io.EOF { - if c.n == 0 { - // An empty body means to refresh the lock. - // http://www.webdav.org/specs/rfc4918.html#refreshing-locks - return lockInfo{}, 0, nil - } - err = errInvalidLockInfo - } - return lockInfo{}, http.StatusBadRequest, err - } - // We only support exclusive (non-shared) write locks. In practice, these are - // the only types of locks that seem to matter. - if li.Exclusive == nil || li.Shared != nil || li.Write == nil { - return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo - } - return li, 0, nil -} - -type countingReader struct { - n int - r io.Reader -} - -func (c *countingReader) Read(p []byte) (int, error) { - n, err := c.r.Read(p) - c.n += n - return n, err -} - -func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) { - depth := "infinity" - if ld.ZeroDepth { - depth = "0" - } - timeout := ld.Duration / time.Second - return fmt.Fprintf(w, "\n"+ - "\n"+ - " \n"+ - " \n"+ - " %s\n"+ - " %s\n"+ - " Second-%d\n"+ - " %s\n"+ - " %s\n"+ - "", - depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root), - ) -} - -func escape(s string) string { - for i := 0; i < len(s); i++ { - switch s[i] { - case '"', '&', '\'', '<', '>': - b := bytes.NewBuffer(nil) - ixml.EscapeText(b, []byte(s)) - return b.String() - } - } - return s -} - -// Next returns the next token, if any, in the XML stream of d. -// RFC 4918 requires to ignore comments, processing instructions -// and directives. -// http://www.webdav.org/specs/rfc4918.html#property_values -// http://www.webdav.org/specs/rfc4918.html#xml-extensibility -func next(d *ixml.Decoder) (ixml.Token, error) { - for { - t, err := d.Token() - if err != nil { - return t, err - } - switch t.(type) { - case ixml.Comment, ixml.Directive, ixml.ProcInst: - continue - default: - return t, nil - } - } -} - -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind) -type propfindProps []xml.Name - -// UnmarshalXML appends the property names enclosed within start to pn. -// -// It returns an error if start does not contain any properties or if -// properties contain values. Character data between properties is ignored. -func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { - for { - t, err := next(d) - if err != nil { - return err - } - switch t.(type) { - case ixml.EndElement: - if len(*pn) == 0 { - return fmt.Errorf("%s must not be empty", start.Name.Local) - } - return nil - case ixml.StartElement: - name := t.(ixml.StartElement).Name - t, err = next(d) - if err != nil { - return err - } - if _, ok := t.(ixml.EndElement); !ok { - return fmt.Errorf("unexpected token %T", t) - } - *pn = append(*pn, xml.Name(name)) - } - } -} - -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind -type propfind struct { - XMLName ixml.Name `xml:"DAV: propfind"` - Allprop *struct{} `xml:"DAV: allprop"` - Propname *struct{} `xml:"DAV: propname"` - Prop propfindProps `xml:"DAV: prop"` - Include propfindProps `xml:"DAV: include"` -} - -func readPropfind(r io.Reader) (pf propfind, status int, err error) { - c := countingReader{r: r} - if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { - if err == io.EOF { - if c.n == 0 { - // An empty body means to propfind allprop. - // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND - return propfind{Allprop: new(struct{})}, 0, nil - } - err = errInvalidPropfind - } - return propfind{}, http.StatusBadRequest, err - } - - if pf.Allprop == nil && pf.Include != nil { - return propfind{}, http.StatusBadRequest, errInvalidPropfind - } - if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) { - return propfind{}, http.StatusBadRequest, errInvalidPropfind - } - if pf.Prop != nil && pf.Propname != nil { - return propfind{}, http.StatusBadRequest, errInvalidPropfind - } - if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil { - return propfind{}, http.StatusBadRequest, errInvalidPropfind - } - return pf, 0, nil -} - -// Property represents a single DAV resource property as defined in RFC 4918. -// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties -type Property struct { - // XMLName is the fully qualified name that identifies this property. - XMLName xml.Name - - // Lang is an optional xml:lang attribute. - Lang string `xml:"xml:lang,attr,omitempty"` - - // InnerXML contains the XML representation of the property value. - // See http://www.webdav.org/specs/rfc4918.html#property_values - // - // Property values of complex type or mixed-content must have fully - // expanded XML namespaces or be self-contained with according - // XML namespace declarations. They must not rely on any XML - // namespace declarations within the scope of the XML document, - // even including the DAV: namespace. - InnerXML []byte `xml:",innerxml"` -} - -// ixmlProperty is the same as the Property type except it holds an ixml.Name -// instead of an xml.Name. -type ixmlProperty struct { - XMLName ixml.Name - Lang string `xml:"xml:lang,attr,omitempty"` - InnerXML []byte `xml:",innerxml"` -} - -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error -// See multistatusWriter for the "D:" namespace prefix. -type xmlError struct { - XMLName ixml.Name `xml:"D:error"` - InnerXML []byte `xml:",innerxml"` -} - -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat -// See multistatusWriter for the "D:" namespace prefix. -type propstat struct { - Prop []Property `xml:"D:prop>_ignored_"` - Status string `xml:"D:status"` - Error *xmlError `xml:"D:error"` - ResponseDescription string `xml:"D:responsedescription,omitempty"` -} - -// ixmlPropstat is the same as the propstat type except it holds an ixml.Name -// instead of an xml.Name. -type ixmlPropstat struct { - Prop []ixmlProperty `xml:"D:prop>_ignored_"` - Status string `xml:"D:status"` - Error *xmlError `xml:"D:error"` - ResponseDescription string `xml:"D:responsedescription,omitempty"` -} - -// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace -// before encoding. See multistatusWriter. -func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { - // Convert from a propstat to an ixmlPropstat. - ixmlPs := ixmlPropstat{ - Prop: make([]ixmlProperty, len(ps.Prop)), - Status: ps.Status, - Error: ps.Error, - ResponseDescription: ps.ResponseDescription, - } - for k, prop := range ps.Prop { - ixmlPs.Prop[k] = ixmlProperty{ - XMLName: ixml.Name(prop.XMLName), - Lang: prop.Lang, - InnerXML: prop.InnerXML, - } - } - - for k, prop := range ixmlPs.Prop { - if prop.XMLName.Space == "DAV:" { - prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} - ixmlPs.Prop[k] = prop - } - } - // Distinct type to avoid infinite recursion of MarshalXML. - type newpropstat ixmlPropstat - return e.EncodeElement(newpropstat(ixmlPs), start) -} - -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response -// See multistatusWriter for the "D:" namespace prefix. -type response struct { - XMLName ixml.Name `xml:"D:response"` - Href []string `xml:"D:href"` - Propstat []propstat `xml:"D:propstat"` - Status string `xml:"D:status,omitempty"` - Error *xmlError `xml:"D:error"` - ResponseDescription string `xml:"D:responsedescription,omitempty"` -} - -// MultistatusWriter marshals one or more Responses into a XML -// multistatus response. -// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus -// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as -// "DAV:" on this element, is prepended on the nested response, as well as on all -// its nested elements. All property names in the DAV: namespace are prefixed as -// well. This is because some versions of Mini-Redirector (on windows 7) ignore -// elements with a default namespace (no prefixed namespace). A less intrusive fix -// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177 -type multistatusWriter struct { - // ResponseDescription contains the optional responsedescription - // of the multistatus XML element. Only the latest content before - // close will be emitted. Empty response descriptions are not - // written. - responseDescription string - - w http.ResponseWriter - enc *ixml.Encoder -} - -// Write validates and emits a DAV response as part of a multistatus response -// element. -// -// It sets the HTTP status code of its underlying http.ResponseWriter to 207 -// (Multi-Status) and populates the Content-Type header. If r is the -// first, valid response to be written, Write prepends the XML representation -// of r with a multistatus tag. Callers must call close after the last response -// has been written. -func (w *multistatusWriter) write(r *response) error { - switch len(r.Href) { - case 0: - return errInvalidResponse - case 1: - if len(r.Propstat) > 0 != (r.Status == "") { - return errInvalidResponse - } - default: - if len(r.Propstat) > 0 || r.Status == "" { - return errInvalidResponse - } - } - err := w.writeHeader() - if err != nil { - return err - } - return w.enc.Encode(r) -} - -// writeHeader writes a XML multistatus start element on w's underlying -// http.ResponseWriter and returns the result of the write operation. -// After the first write attempt, writeHeader becomes a no-op. -func (w *multistatusWriter) writeHeader() error { - if w.enc != nil { - return nil - } - w.w.Header().Add("Content-Type", "text/xml; charset=utf-8") - w.w.WriteHeader(StatusMulti) - _, err := fmt.Fprintf(w.w, ``) - if err != nil { - return err - } - w.enc = ixml.NewEncoder(w.w) - return w.enc.EncodeToken(ixml.StartElement{ - Name: ixml.Name{ - Space: "DAV:", - Local: "multistatus", - }, - Attr: []ixml.Attr{{ - Name: ixml.Name{Space: "xmlns", Local: "D"}, - Value: "DAV:", - }}, - }) -} - -// Close completes the marshalling of the multistatus response. It returns -// an error if the multistatus response could not be completed. If both the -// return value and field enc of w are nil, then no multistatus response has -// been written. -func (w *multistatusWriter) close() error { - if w.enc == nil { - return nil - } - var end []ixml.Token - if w.responseDescription != "" { - name := ixml.Name{Space: "DAV:", Local: "responsedescription"} - end = append(end, - ixml.StartElement{Name: name}, - ixml.CharData(w.responseDescription), - ixml.EndElement{Name: name}, - ) - } - end = append(end, ixml.EndElement{ - Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, - }) - for _, t := range end { - err := w.enc.EncodeToken(t) - if err != nil { - return err - } - } - return w.enc.Flush() -} - -var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} - -func xmlLang(s ixml.StartElement, d string) string { - for _, attr := range s.Attr { - if attr.Name == xmlLangName { - return attr.Value - } - } - return d -} - -type xmlValue []byte - -func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { - // The XML value of a property can be arbitrary, mixed-content XML. - // To make sure that the unmarshalled value contains all required - // namespaces, we encode all the property value XML tokens into a - // buffer. This forces the encoder to redeclare any used namespaces. - var b bytes.Buffer - e := ixml.NewEncoder(&b) - for { - t, err := next(d) - if err != nil { - return err - } - if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { - break - } - if err = e.EncodeToken(t); err != nil { - return err - } - } - err := e.Flush() - if err != nil { - return err - } - *v = b.Bytes() - return nil -} - -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) -type proppatchProps []Property - -// UnmarshalXML appends the property names and values enclosed within start -// to ps. -// -// An xml:lang attribute that is defined either on the DAV:prop or property -// name XML element is propagated to the property's Lang field. -// -// UnmarshalXML returns an error if start does not contain any properties or if -// property values contain syntactically incorrect XML. -func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { - lang := xmlLang(start, "") - for { - t, err := next(d) - if err != nil { - return err - } - switch elem := t.(type) { - case ixml.EndElement: - if len(*ps) == 0 { - return fmt.Errorf("%s must not be empty", start.Name.Local) - } - return nil - case ixml.StartElement: - p := Property{ - XMLName: xml.Name(t.(ixml.StartElement).Name), - Lang: xmlLang(t.(ixml.StartElement), lang), - } - err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) - if err != nil { - return err - } - *ps = append(*ps, p) - } - } -} - -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove -type setRemove struct { - XMLName ixml.Name - Lang string `xml:"xml:lang,attr,omitempty"` - Prop proppatchProps `xml:"DAV: prop"` -} - -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate -type propertyupdate struct { - XMLName ixml.Name `xml:"DAV: propertyupdate"` - Lang string `xml:"xml:lang,attr,omitempty"` - SetRemove []setRemove `xml:",any"` -} - -func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { - var pu propertyupdate - if err = ixml.NewDecoder(r).Decode(&pu); err != nil { - return nil, http.StatusBadRequest, err - } - for _, op := range pu.SetRemove { - remove := false - switch op.XMLName { - case ixml.Name{Space: "DAV:", Local: "set"}: - // No-op. - case ixml.Name{Space: "DAV:", Local: "remove"}: - for _, p := range op.Prop { - if len(p.InnerXML) > 0 { - return nil, http.StatusBadRequest, errInvalidProppatch - } - } - remove = true - default: - return nil, http.StatusBadRequest, errInvalidProppatch - } - patches = append(patches, Proppatch{Remove: remove, Props: op.Prop}) - } - return patches, 0, nil -} diff --git a/vendor/golang.org/x/net/webdav/xml_test.go b/vendor/golang.org/x/net/webdav/xml_test.go deleted file mode 100644 index a3d9e1ed..00000000 --- a/vendor/golang.org/x/net/webdav/xml_test.go +++ /dev/null @@ -1,906 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webdav - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/http/httptest" - "reflect" - "sort" - "strings" - "testing" - - ixml "golang.org/x/net/webdav/internal/xml" -) - -func TestReadLockInfo(t *testing.T) { - // The "section x.y.z" test cases come from section x.y.z of the spec at - // http://www.webdav.org/specs/rfc4918.html - testCases := []struct { - desc string - input string - wantLI lockInfo - wantStatus int - }{{ - "bad: junk", - "xxx", - lockInfo{}, - http.StatusBadRequest, - }, { - "bad: invalid owner XML", - "" + - "\n" + - " \n" + - " \n" + - " \n" + - " no end tag \n" + - " \n" + - "", - lockInfo{}, - http.StatusBadRequest, - }, { - "bad: invalid UTF-8", - "" + - "\n" + - " \n" + - " \n" + - " \n" + - " \xff \n" + - " \n" + - "", - lockInfo{}, - http.StatusBadRequest, - }, { - "bad: unfinished XML #1", - "" + - "\n" + - " \n" + - " \n", - lockInfo{}, - http.StatusBadRequest, - }, { - "bad: unfinished XML #2", - "" + - "\n" + - " \n" + - " \n" + - " \n", - lockInfo{}, - http.StatusBadRequest, - }, { - "good: empty", - "", - lockInfo{}, - 0, - }, { - "good: plain-text owner", - "" + - "\n" + - " \n" + - " \n" + - " gopher\n" + - "", - lockInfo{ - XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, - Exclusive: new(struct{}), - Write: new(struct{}), - Owner: owner{ - InnerXML: "gopher", - }, - }, - 0, - }, { - "section 9.10.7", - "" + - "\n" + - " \n" + - " \n" + - " \n" + - " http://example.org/~ejw/contact.html\n" + - " \n" + - "", - lockInfo{ - XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, - Exclusive: new(struct{}), - Write: new(struct{}), - Owner: owner{ - InnerXML: "\n http://example.org/~ejw/contact.html\n ", - }, - }, - 0, - }} - - for _, tc := range testCases { - li, status, err := readLockInfo(strings.NewReader(tc.input)) - if tc.wantStatus != 0 { - if err == nil { - t.Errorf("%s: got nil error, want non-nil", tc.desc) - continue - } - } else if err != nil { - t.Errorf("%s: %v", tc.desc, err) - continue - } - if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus { - t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v", - tc.desc, li, status, tc.wantLI, tc.wantStatus) - continue - } - } -} - -func TestReadPropfind(t *testing.T) { - testCases := []struct { - desc string - input string - wantPF propfind - wantStatus int - }{{ - desc: "propfind: propname", - input: "" + - "\n" + - " \n" + - "", - wantPF: propfind{ - XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, - Propname: new(struct{}), - }, - }, { - desc: "propfind: empty body means allprop", - input: "", - wantPF: propfind{ - Allprop: new(struct{}), - }, - }, { - desc: "propfind: allprop", - input: "" + - "\n" + - " \n" + - "", - wantPF: propfind{ - XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, - Allprop: new(struct{}), - }, - }, { - desc: "propfind: allprop followed by include", - input: "" + - "\n" + - " \n" + - " \n" + - "", - wantPF: propfind{ - XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, - Allprop: new(struct{}), - Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, - }, - }, { - desc: "propfind: include followed by allprop", - input: "" + - "\n" + - " \n" + - " \n" + - "", - wantPF: propfind{ - XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, - Allprop: new(struct{}), - Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, - }, - }, { - desc: "propfind: propfind", - input: "" + - "\n" + - " \n" + - "", - wantPF: propfind{ - XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, - Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, - }, - }, { - desc: "propfind: prop with ignored comments", - input: "" + - "\n" + - " \n" + - " \n" + - " \n" + - " \n" + - "", - wantPF: propfind{ - XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, - Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, - }, - }, { - desc: "propfind: propfind with ignored whitespace", - input: "" + - "\n" + - " \n" + - "", - wantPF: propfind{ - XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, - Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, - }, - }, { - desc: "propfind: propfind with ignored mixed-content", - input: "" + - "\n" + - " foobar\n" + - "", - wantPF: propfind{ - XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, - Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, - }, - }, { - desc: "propfind: propname with ignored element (section A.4)", - input: "" + - "\n" + - " \n" + - " *boss*\n" + - "", - wantPF: propfind{ - XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, - Propname: new(struct{}), - }, - }, { - desc: "propfind: bad: junk", - input: "xxx", - wantStatus: http.StatusBadRequest, - }, { - desc: "propfind: bad: propname and allprop (section A.3)", - input: "" + - "\n" + - " " + - " " + - "", - wantStatus: http.StatusBadRequest, - }, { - desc: "propfind: bad: propname and prop", - input: "" + - "\n" + - " \n" + - " \n" + - "", - wantStatus: http.StatusBadRequest, - }, { - desc: "propfind: bad: allprop and prop", - input: "" + - "\n" + - " \n" + - " \n" + - "", - wantStatus: http.StatusBadRequest, - }, { - desc: "propfind: bad: empty propfind with ignored element (section A.4)", - input: "" + - "\n" + - " \n" + - "", - wantStatus: http.StatusBadRequest, - }, { - desc: "propfind: bad: empty prop", - input: "" + - "\n" + - " \n" + - "", - wantStatus: http.StatusBadRequest, - }, { - desc: "propfind: bad: prop with just chardata", - input: "" + - "\n" + - " foo\n" + - "", - wantStatus: http.StatusBadRequest, - }, { - desc: "bad: interrupted prop", - input: "" + - "\n" + - " \n", - wantStatus: http.StatusBadRequest, - }, { - desc: "bad: malformed end element prop", - input: "" + - "\n" + - " \n", - wantStatus: http.StatusBadRequest, - }, { - desc: "propfind: bad: property with chardata value", - input: "" + - "\n" + - " bar\n" + - "", - wantStatus: http.StatusBadRequest, - }, { - desc: "propfind: bad: property with whitespace value", - input: "" + - "\n" + - " \n" + - "", - wantStatus: http.StatusBadRequest, - }, { - desc: "propfind: bad: include without allprop", - input: "" + - "\n" + - " \n" + - "", - wantStatus: http.StatusBadRequest, - }} - - for _, tc := range testCases { - pf, status, err := readPropfind(strings.NewReader(tc.input)) - if tc.wantStatus != 0 { - if err == nil { - t.Errorf("%s: got nil error, want non-nil", tc.desc) - continue - } - } else if err != nil { - t.Errorf("%s: %v", tc.desc, err) - continue - } - if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus { - t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v", - tc.desc, pf, status, tc.wantPF, tc.wantStatus) - continue - } - } -} - -func TestMultistatusWriter(t *testing.T) { - ///The "section x.y.z" test cases come from section x.y.z of the spec at - // http://www.webdav.org/specs/rfc4918.html - testCases := []struct { - desc string - responses []response - respdesc string - writeHeader bool - wantXML string - wantCode int - wantErr error - }{{ - desc: "section 9.2.2 (failed dependency)", - responses: []response{{ - Href: []string{"http://example.com/foo"}, - Propstat: []propstat{{ - Prop: []Property{{ - XMLName: xml.Name{ - Space: "http://ns.example.com/", - Local: "Authors", - }, - }}, - Status: "HTTP/1.1 424 Failed Dependency", - }, { - Prop: []Property{{ - XMLName: xml.Name{ - Space: "http://ns.example.com/", - Local: "Copyright-Owner", - }, - }}, - Status: "HTTP/1.1 409 Conflict", - }}, - ResponseDescription: "Copyright Owner cannot be deleted or altered.", - }}, - wantXML: `` + - `` + - `` + - ` ` + - ` http://example.com/foo` + - ` ` + - ` ` + - ` ` + - ` ` + - ` HTTP/1.1 424 Failed Dependency` + - ` ` + - ` ` + - ` ` + - ` ` + - ` ` + - ` HTTP/1.1 409 Conflict` + - ` ` + - ` Copyright Owner cannot be deleted or altered.` + - `` + - ``, - wantCode: StatusMulti, - }, { - desc: "section 9.6.2 (lock-token-submitted)", - responses: []response{{ - Href: []string{"http://example.com/foo"}, - Status: "HTTP/1.1 423 Locked", - Error: &xmlError{ - InnerXML: []byte(``), - }, - }}, - wantXML: `` + - `` + - `` + - ` ` + - ` http://example.com/foo` + - ` HTTP/1.1 423 Locked` + - ` ` + - ` ` + - ``, - wantCode: StatusMulti, - }, { - desc: "section 9.1.3", - responses: []response{{ - Href: []string{"http://example.com/foo"}, - Propstat: []propstat{{ - Prop: []Property{{ - XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"}, - InnerXML: []byte(`` + - `` + - `Box type A` + - ``), - }, { - XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"}, - InnerXML: []byte(`` + - `` + - `J.J. Johnson` + - ``), - }}, - Status: "HTTP/1.1 200 OK", - }, { - Prop: []Property{{ - XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"}, - }, { - XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"}, - }}, - Status: "HTTP/1.1 403 Forbidden", - ResponseDescription: "The user does not have access to the DingALing property.", - }}, - }}, - respdesc: "There has been an access violation error.", - wantXML: `` + - `` + - `` + - ` ` + - ` http://example.com/foo` + - ` ` + - ` ` + - ` Box type A` + - ` J.J. Johnson` + - ` ` + - ` HTTP/1.1 200 OK` + - ` ` + - ` ` + - ` ` + - ` ` + - ` ` + - ` ` + - ` HTTP/1.1 403 Forbidden` + - ` The user does not have access to the DingALing property.` + - ` ` + - ` ` + - ` There has been an access violation error.` + - ``, - wantCode: StatusMulti, - }, { - desc: "no response written", - // default of http.responseWriter - wantCode: http.StatusOK, - }, { - desc: "no response written (with description)", - respdesc: "too bad", - // default of http.responseWriter - wantCode: http.StatusOK, - }, { - desc: "empty multistatus with header", - writeHeader: true, - wantXML: ``, - wantCode: StatusMulti, - }, { - desc: "bad: no href", - responses: []response{{ - Propstat: []propstat{{ - Prop: []Property{{ - XMLName: xml.Name{ - Space: "http://example.com/", - Local: "foo", - }, - }}, - Status: "HTTP/1.1 200 OK", - }}, - }}, - wantErr: errInvalidResponse, - // default of http.responseWriter - wantCode: http.StatusOK, - }, { - desc: "bad: multiple hrefs and no status", - responses: []response{{ - Href: []string{"http://example.com/foo", "http://example.com/bar"}, - }}, - wantErr: errInvalidResponse, - // default of http.responseWriter - wantCode: http.StatusOK, - }, { - desc: "bad: one href and no propstat", - responses: []response{{ - Href: []string{"http://example.com/foo"}, - }}, - wantErr: errInvalidResponse, - // default of http.responseWriter - wantCode: http.StatusOK, - }, { - desc: "bad: status with one href and propstat", - responses: []response{{ - Href: []string{"http://example.com/foo"}, - Propstat: []propstat{{ - Prop: []Property{{ - XMLName: xml.Name{ - Space: "http://example.com/", - Local: "foo", - }, - }}, - Status: "HTTP/1.1 200 OK", - }}, - Status: "HTTP/1.1 200 OK", - }}, - wantErr: errInvalidResponse, - // default of http.responseWriter - wantCode: http.StatusOK, - }, { - desc: "bad: multiple hrefs and propstat", - responses: []response{{ - Href: []string{ - "http://example.com/foo", - "http://example.com/bar", - }, - Propstat: []propstat{{ - Prop: []Property{{ - XMLName: xml.Name{ - Space: "http://example.com/", - Local: "foo", - }, - }}, - Status: "HTTP/1.1 200 OK", - }}, - }}, - wantErr: errInvalidResponse, - // default of http.responseWriter - wantCode: http.StatusOK, - }} - - n := xmlNormalizer{omitWhitespace: true} -loop: - for _, tc := range testCases { - rec := httptest.NewRecorder() - w := multistatusWriter{w: rec, responseDescription: tc.respdesc} - if tc.writeHeader { - if err := w.writeHeader(); err != nil { - t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err) - continue - } - } - for _, r := range tc.responses { - if err := w.write(&r); err != nil { - if err != tc.wantErr { - t.Errorf("%s: got write error %v, want %v", - tc.desc, err, tc.wantErr) - } - continue loop - } - } - if err := w.close(); err != tc.wantErr { - t.Errorf("%s: got close error %v, want %v", - tc.desc, err, tc.wantErr) - continue - } - if rec.Code != tc.wantCode { - t.Errorf("%s: got HTTP status code %d, want %d\n", - tc.desc, rec.Code, tc.wantCode) - continue - } - gotXML := rec.Body.String() - eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML)) - if err != nil { - t.Errorf("%s: equalXML: %v", tc.desc, err) - continue - } - if !eq { - t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML) - } - } -} - -func TestReadProppatch(t *testing.T) { - ppStr := func(pps []Proppatch) string { - var outer []string - for _, pp := range pps { - var inner []string - for _, p := range pp.Props { - inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}", - p.XMLName, p.Lang, p.InnerXML)) - } - outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}", - pp.Remove, strings.Join(inner, ", "))) - } - return "[" + strings.Join(outer, ", ") + "]" - } - - testCases := []struct { - desc string - input string - wantPP []Proppatch - wantStatus int - }{{ - desc: "proppatch: section 9.2 (with simple property value)", - input: `` + - `` + - `` + - ` ` + - ` somevalue` + - ` ` + - ` ` + - ` ` + - ` ` + - ``, - wantPP: []Proppatch{{ - Props: []Property{{ - xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"}, - "", - []byte(`somevalue`), - }}, - }, { - Remove: true, - Props: []Property{{ - xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"}, - "", - nil, - }}, - }}, - }, { - desc: "proppatch: lang attribute on prop", - input: `` + - `` + - `` + - ` ` + - ` ` + - ` ` + - ` ` + - ` ` + - ``, - wantPP: []Proppatch{{ - Props: []Property{{ - xml.Name{Space: "http://example.com/ns", Local: "foo"}, - "en", - nil, - }}, - }}, - }, { - desc: "bad: remove with value", - input: `` + - `` + - `` + - ` ` + - ` ` + - ` ` + - ` Jim Whitehead` + - ` ` + - ` ` + - ` ` + - ``, - wantStatus: http.StatusBadRequest, - }, { - desc: "bad: empty propertyupdate", - input: `` + - `` + - ``, - wantStatus: http.StatusBadRequest, - }, { - desc: "bad: empty prop", - input: `` + - `` + - `` + - ` ` + - ` ` + - ` ` + - ``, - wantStatus: http.StatusBadRequest, - }} - - for _, tc := range testCases { - pp, status, err := readProppatch(strings.NewReader(tc.input)) - if tc.wantStatus != 0 { - if err == nil { - t.Errorf("%s: got nil error, want non-nil", tc.desc) - continue - } - } else if err != nil { - t.Errorf("%s: %v", tc.desc, err) - continue - } - if status != tc.wantStatus { - t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus) - continue - } - if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus { - t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP)) - } - } -} - -func TestUnmarshalXMLValue(t *testing.T) { - testCases := []struct { - desc string - input string - wantVal string - }{{ - desc: "simple char data", - input: "foo", - wantVal: "foo", - }, { - desc: "empty element", - input: "", - wantVal: "", - }, { - desc: "preserve namespace", - input: ``, - wantVal: ``, - }, { - desc: "preserve root element namespace", - input: ``, - wantVal: ``, - }, { - desc: "preserve whitespace", - input: " \t ", - wantVal: " \t ", - }, { - desc: "preserve mixed content", - input: ` a `, - wantVal: ` a `, - }, { - desc: "section 9.2", - input: `` + - `` + - ` Jim Whitehead` + - ` Roy Fielding` + - ``, - wantVal: `` + - ` Jim Whitehead` + - ` Roy Fielding`, - }, { - desc: "section 4.3.1 (mixed content)", - input: `` + - `` + - ` Jane Doe` + - ` ` + - ` mailto:jane.doe@example.com` + - ` http://www.example.com` + - ` ` + - ` Jane has been working way too long on the` + - ` long-awaited revision of ]]>.` + - ` ` + - ``, - wantVal: `` + - ` Jane Doe` + - ` ` + - ` mailto:jane.doe@example.com` + - ` http://www.example.com` + - ` ` + - ` Jane has been working way too long on the` + - ` long-awaited revision of <RFC2518>.` + - ` `, - }} - - var n xmlNormalizer - for _, tc := range testCases { - d := ixml.NewDecoder(strings.NewReader(tc.input)) - var v xmlValue - if err := d.Decode(&v); err != nil { - t.Errorf("%s: got error %v, want nil", tc.desc, err) - continue - } - eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal)) - if err != nil { - t.Errorf("%s: equalXML: %v", tc.desc, err) - continue - } - if !eq { - t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal) - } - } -} - -// xmlNormalizer normalizes XML. -type xmlNormalizer struct { - // omitWhitespace instructs to ignore whitespace between element tags. - omitWhitespace bool - // omitComments instructs to ignore XML comments. - omitComments bool -} - -// normalize writes the normalized XML content of r to w. It applies the -// following rules -// -// * Rename namespace prefixes according to an internal heuristic. -// * Remove unnecessary namespace declarations. -// * Sort attributes in XML start elements in lexical order of their -// fully qualified name. -// * Remove XML directives and processing instructions. -// * Remove CDATA between XML tags that only contains whitespace, if -// instructed to do so. -// * Remove comments, if instructed to do so. -// -func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { - d := ixml.NewDecoder(r) - e := ixml.NewEncoder(w) - for { - t, err := d.Token() - if err != nil { - if t == nil && err == io.EOF { - break - } - return err - } - switch val := t.(type) { - case ixml.Directive, ixml.ProcInst: - continue - case ixml.Comment: - if n.omitComments { - continue - } - case ixml.CharData: - if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { - continue - } - case ixml.StartElement: - start, _ := ixml.CopyToken(val).(ixml.StartElement) - attr := start.Attr[:0] - for _, a := range start.Attr { - if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { - continue - } - attr = append(attr, a) - } - sort.Sort(byName(attr)) - start.Attr = attr - t = start - } - err = e.EncodeToken(t) - if err != nil { - return err - } - } - return e.Flush() -} - -// equalXML tests for equality of the normalized XML contents of a and b. -func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { - var buf bytes.Buffer - if err := n.normalize(&buf, a); err != nil { - return false, err - } - normA := buf.String() - buf.Reset() - if err := n.normalize(&buf, b); err != nil { - return false, err - } - normB := buf.String() - return normA == normB, nil -} - -type byName []ixml.Attr - -func (a byName) Len() int { return len(a) } -func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byName) Less(i, j int) bool { - if a[i].Name.Space != a[j].Name.Space { - return a[i].Name.Space < a[j].Name.Space - } - return a[i].Name.Local < a[j].Name.Local -} diff --git a/vendor/golang.org/x/net/websocket/dial_test.go b/vendor/golang.org/x/net/websocket/dial_test.go deleted file mode 100644 index aa03e30d..00000000 --- a/vendor/golang.org/x/net/websocket/dial_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/tls" - "fmt" - "log" - "net" - "net/http/httptest" - "testing" - "time" -) - -// This test depend on Go 1.3+ because in earlier versions the Dialer won't be -// used in TLS connections and a timeout won't be triggered. -func TestDialConfigTLSWithDialer(t *testing.T) { - tlsServer := httptest.NewTLSServer(nil) - tlsServerAddr := tlsServer.Listener.Addr().String() - log.Print("Test TLS WebSocket server listening on ", tlsServerAddr) - defer tlsServer.Close() - config, _ := NewConfig(fmt.Sprintf("wss://%s/echo", tlsServerAddr), "http://localhost") - config.Dialer = &net.Dialer{ - Deadline: time.Now().Add(-time.Minute), - } - config.TlsConfig = &tls.Config{ - InsecureSkipVerify: true, - } - _, err := DialConfig(config) - dialerr, ok := err.(*DialError) - if !ok { - t.Fatalf("DialError expected, got %#v", err) - } - neterr, ok := dialerr.Err.(*net.OpError) - if !ok { - t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) - } - if !neterr.Timeout() { - t.Fatalf("expected timeout error, got %#v", neterr) - } -} diff --git a/vendor/golang.org/x/net/websocket/exampledial_test.go b/vendor/golang.org/x/net/websocket/exampledial_test.go deleted file mode 100644 index 72bb9d48..00000000 --- a/vendor/golang.org/x/net/websocket/exampledial_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket_test - -import ( - "fmt" - "log" - - "golang.org/x/net/websocket" -) - -// This example demonstrates a trivial client. -func ExampleDial() { - origin := "http://localhost/" - url := "ws://localhost:12345/ws" - ws, err := websocket.Dial(url, "", origin) - if err != nil { - log.Fatal(err) - } - if _, err := ws.Write([]byte("hello, world!\n")); err != nil { - log.Fatal(err) - } - var msg = make([]byte, 512) - var n int - if n, err = ws.Read(msg); err != nil { - log.Fatal(err) - } - fmt.Printf("Received: %s.\n", msg[:n]) -} diff --git a/vendor/golang.org/x/net/websocket/examplehandler_test.go b/vendor/golang.org/x/net/websocket/examplehandler_test.go deleted file mode 100644 index f22a98fc..00000000 --- a/vendor/golang.org/x/net/websocket/examplehandler_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket_test - -import ( - "io" - "net/http" - - "golang.org/x/net/websocket" -) - -// Echo the data received on the WebSocket. -func EchoServer(ws *websocket.Conn) { - io.Copy(ws, ws) -} - -// This example demonstrates a trivial echo server. -func ExampleHandler() { - http.Handle("/echo", websocket.Handler(EchoServer)) - err := http.ListenAndServe(":12345", nil) - if err != nil { - panic("ListenAndServe: " + err.Error()) - } -} diff --git a/vendor/golang.org/x/net/websocket/hybi_test.go b/vendor/golang.org/x/net/websocket/hybi_test.go deleted file mode 100644 index 9504aa2d..00000000 --- a/vendor/golang.org/x/net/websocket/hybi_test.go +++ /dev/null @@ -1,608 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "testing" -) - -// Test the getNonceAccept function with values in -// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 -func TestSecWebSocketAccept(t *testing.T) { - nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==") - expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=") - accept, err := getNonceAccept(nonce) - if err != nil { - t.Errorf("getNonceAccept: returned error %v", err) - return - } - if !bytes.Equal(expected, accept) { - t.Errorf("getNonceAccept: expected %q got %q", expected, accept) - } -} - -func TestHybiClientHandshake(t *testing.T) { - type test struct { - url, host string - } - tests := []test{ - {"ws://server.example.com/chat", "server.example.com"}, - {"ws://127.0.0.1/chat", "127.0.0.1"}, - } - if _, err := url.ParseRequestURI("http://[fe80::1%25lo0]"); err == nil { - tests = append(tests, test{"ws://[fe80::1%25lo0]/chat", "[fe80::1]"}) - } - - for _, tt := range tests { - var b bytes.Buffer - bw := bufio.NewWriter(&b) - br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols -Upgrade: websocket -Connection: Upgrade -Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= -Sec-WebSocket-Protocol: chat - -`)) - var err error - var config Config - config.Location, err = url.ParseRequestURI(tt.url) - if err != nil { - t.Fatal("location url", err) - } - config.Origin, err = url.ParseRequestURI("http://example.com") - if err != nil { - t.Fatal("origin url", err) - } - config.Protocol = append(config.Protocol, "chat") - config.Protocol = append(config.Protocol, "superchat") - config.Version = ProtocolVersionHybi13 - config.handshakeData = map[string]string{ - "key": "dGhlIHNhbXBsZSBub25jZQ==", - } - if err := hybiClientHandshake(&config, br, bw); err != nil { - t.Fatal("handshake", err) - } - req, err := http.ReadRequest(bufio.NewReader(&b)) - if err != nil { - t.Fatal("read request", err) - } - if req.Method != "GET" { - t.Errorf("request method expected GET, but got %s", req.Method) - } - if req.URL.Path != "/chat" { - t.Errorf("request path expected /chat, but got %s", req.URL.Path) - } - if req.Proto != "HTTP/1.1" { - t.Errorf("request proto expected HTTP/1.1, but got %s", req.Proto) - } - if req.Host != tt.host { - t.Errorf("request host expected %s, but got %s", tt.host, req.Host) - } - var expectedHeader = map[string]string{ - "Connection": "Upgrade", - "Upgrade": "websocket", - "Sec-Websocket-Key": config.handshakeData["key"], - "Origin": config.Origin.String(), - "Sec-Websocket-Protocol": "chat, superchat", - "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), - } - for k, v := range expectedHeader { - if req.Header.Get(k) != v { - t.Errorf("%s expected %s, but got %v", k, v, req.Header.Get(k)) - } - } - } -} - -func TestHybiClientHandshakeWithHeader(t *testing.T) { - b := bytes.NewBuffer([]byte{}) - bw := bufio.NewWriter(b) - br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols -Upgrade: websocket -Connection: Upgrade -Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= -Sec-WebSocket-Protocol: chat - -`)) - var err error - config := new(Config) - config.Location, err = url.ParseRequestURI("ws://server.example.com/chat") - if err != nil { - t.Fatal("location url", err) - } - config.Origin, err = url.ParseRequestURI("http://example.com") - if err != nil { - t.Fatal("origin url", err) - } - config.Protocol = append(config.Protocol, "chat") - config.Protocol = append(config.Protocol, "superchat") - config.Version = ProtocolVersionHybi13 - config.Header = http.Header(make(map[string][]string)) - config.Header.Add("User-Agent", "test") - - config.handshakeData = map[string]string{ - "key": "dGhlIHNhbXBsZSBub25jZQ==", - } - err = hybiClientHandshake(config, br, bw) - if err != nil { - t.Errorf("handshake failed: %v", err) - } - req, err := http.ReadRequest(bufio.NewReader(b)) - if err != nil { - t.Fatalf("read request: %v", err) - } - if req.Method != "GET" { - t.Errorf("request method expected GET, but got %q", req.Method) - } - if req.URL.Path != "/chat" { - t.Errorf("request path expected /chat, but got %q", req.URL.Path) - } - if req.Proto != "HTTP/1.1" { - t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto) - } - if req.Host != "server.example.com" { - t.Errorf("request Host expected server.example.com, but got %v", req.Host) - } - var expectedHeader = map[string]string{ - "Connection": "Upgrade", - "Upgrade": "websocket", - "Sec-Websocket-Key": config.handshakeData["key"], - "Origin": config.Origin.String(), - "Sec-Websocket-Protocol": "chat, superchat", - "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), - "User-Agent": "test", - } - for k, v := range expectedHeader { - if req.Header.Get(k) != v { - t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k))) - } - } -} - -func TestHybiServerHandshake(t *testing.T) { - config := new(Config) - handshaker := &hybiServerHandshaker{Config: config} - br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 -Host: server.example.com -Upgrade: websocket -Connection: Upgrade -Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== -Origin: http://example.com -Sec-WebSocket-Protocol: chat, superchat -Sec-WebSocket-Version: 13 - -`)) - req, err := http.ReadRequest(br) - if err != nil { - t.Fatal("request", err) - } - code, err := handshaker.ReadHandshake(br, req) - if err != nil { - t.Errorf("handshake failed: %v", err) - } - if code != http.StatusSwitchingProtocols { - t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) - } - expectedProtocols := []string{"chat", "superchat"} - if fmt.Sprintf("%v", config.Protocol) != fmt.Sprintf("%v", expectedProtocols) { - t.Errorf("protocol expected %q but got %q", expectedProtocols, config.Protocol) - } - b := bytes.NewBuffer([]byte{}) - bw := bufio.NewWriter(b) - - config.Protocol = config.Protocol[:1] - - err = handshaker.AcceptHandshake(bw) - if err != nil { - t.Errorf("handshake response failed: %v", err) - } - expectedResponse := strings.Join([]string{ - "HTTP/1.1 101 Switching Protocols", - "Upgrade: websocket", - "Connection: Upgrade", - "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", - "Sec-WebSocket-Protocol: chat", - "", ""}, "\r\n") - - if b.String() != expectedResponse { - t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) - } -} - -func TestHybiServerHandshakeNoSubProtocol(t *testing.T) { - config := new(Config) - handshaker := &hybiServerHandshaker{Config: config} - br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 -Host: server.example.com -Upgrade: websocket -Connection: Upgrade -Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== -Origin: http://example.com -Sec-WebSocket-Version: 13 - -`)) - req, err := http.ReadRequest(br) - if err != nil { - t.Fatal("request", err) - } - code, err := handshaker.ReadHandshake(br, req) - if err != nil { - t.Errorf("handshake failed: %v", err) - } - if code != http.StatusSwitchingProtocols { - t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) - } - if len(config.Protocol) != 0 { - t.Errorf("len(config.Protocol) expected 0, but got %q", len(config.Protocol)) - } - b := bytes.NewBuffer([]byte{}) - bw := bufio.NewWriter(b) - - err = handshaker.AcceptHandshake(bw) - if err != nil { - t.Errorf("handshake response failed: %v", err) - } - expectedResponse := strings.Join([]string{ - "HTTP/1.1 101 Switching Protocols", - "Upgrade: websocket", - "Connection: Upgrade", - "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", - "", ""}, "\r\n") - - if b.String() != expectedResponse { - t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) - } -} - -func TestHybiServerHandshakeHybiBadVersion(t *testing.T) { - config := new(Config) - handshaker := &hybiServerHandshaker{Config: config} - br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 -Host: server.example.com -Upgrade: websocket -Connection: Upgrade -Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== -Sec-WebSocket-Origin: http://example.com -Sec-WebSocket-Protocol: chat, superchat -Sec-WebSocket-Version: 9 - -`)) - req, err := http.ReadRequest(br) - if err != nil { - t.Fatal("request", err) - } - code, err := handshaker.ReadHandshake(br, req) - if err != ErrBadWebSocketVersion { - t.Errorf("handshake expected err %q but got %q", ErrBadWebSocketVersion, err) - } - if code != http.StatusBadRequest { - t.Errorf("status expected %q but got %q", http.StatusBadRequest, code) - } -} - -func testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) { - b := bytes.NewBuffer([]byte{}) - frameWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false} - w, _ := frameWriterFactory.NewFrameWriter(TextFrame) - w.(*hybiFrameWriter).header = frameHeader - _, err := w.Write(testPayload) - w.Close() - if err != nil { - t.Errorf("Write error %q", err) - } - var expectedFrame []byte - expectedFrame = append(expectedFrame, testHeader...) - expectedFrame = append(expectedFrame, testMaskedPayload...) - if !bytes.Equal(expectedFrame, b.Bytes()) { - t.Errorf("frame expected %q got %q", expectedFrame, b.Bytes()) - } - frameReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)} - r, err := frameReaderFactory.NewFrameReader() - if err != nil { - t.Errorf("Read error %q", err) - } - if header := r.HeaderReader(); header == nil { - t.Errorf("no header") - } else { - actualHeader := make([]byte, r.Len()) - n, err := header.Read(actualHeader) - if err != nil { - t.Errorf("Read header error %q", err) - } else { - if n < len(testHeader) { - t.Errorf("header too short %q got %q", testHeader, actualHeader[:n]) - } - if !bytes.Equal(testHeader, actualHeader[:n]) { - t.Errorf("header expected %q got %q", testHeader, actualHeader[:n]) - } - } - } - if trailer := r.TrailerReader(); trailer != nil { - t.Errorf("unexpected trailer %q", trailer) - } - frame := r.(*hybiFrameReader) - if frameHeader.Fin != frame.header.Fin || - frameHeader.OpCode != frame.header.OpCode || - len(testPayload) != int(frame.header.Length) { - t.Errorf("mismatch %v (%d) vs %v", frameHeader, len(testPayload), frame) - } - payload := make([]byte, len(testPayload)) - _, err = r.Read(payload) - if err != nil && err != io.EOF { - t.Errorf("read %v", err) - } - if !bytes.Equal(testPayload, payload) { - t.Errorf("payload %q vs %q", testPayload, payload) - } -} - -func TestHybiShortTextFrame(t *testing.T) { - frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} - payload := []byte("hello") - testHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader) - - payload = make([]byte, 125) - testHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader) -} - -func TestHybiShortMaskedTextFrame(t *testing.T) { - frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame, - MaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}} - payload := []byte("hello") - maskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3} - header := []byte{0x81, 0x85} - header = append(header, frameHeader.MaskingKey...) - testHybiFrame(t, header, payload, maskedPayload, frameHeader) -} - -func TestHybiShortBinaryFrame(t *testing.T) { - frameHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame} - payload := []byte("hello") - testHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader) - - payload = make([]byte, 125) - testHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader) -} - -func TestHybiControlFrame(t *testing.T) { - payload := []byte("hello") - - frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame} - testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader) - - frameHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame} - testHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader) - - frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} - testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader) - - frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} - testHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader) - - frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame} - payload = []byte{0x03, 0xe8} // 1000 - testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader) -} - -func TestHybiLongFrame(t *testing.T) { - frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} - payload := make([]byte, 126) - testHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader) - - payload = make([]byte, 65535) - testHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader) - - payload = make([]byte, 65536) - testHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader) -} - -func TestHybiClientRead(t *testing.T) { - wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', - 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping - 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} - br := bufio.NewReader(bytes.NewBuffer(wireData)) - bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) - conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) - - msg := make([]byte, 512) - n, err := conn.Read(msg) - if err != nil { - t.Errorf("read 1st frame, error %q", err) - } - if n != 5 { - t.Errorf("read 1st frame, expect 5, got %d", n) - } - if !bytes.Equal(wireData[2:7], msg[:n]) { - t.Errorf("read 1st frame %v, got %v", wireData[2:7], msg[:n]) - } - n, err = conn.Read(msg) - if err != nil { - t.Errorf("read 2nd frame, error %q", err) - } - if n != 5 { - t.Errorf("read 2nd frame, expect 5, got %d", n) - } - if !bytes.Equal(wireData[16:21], msg[:n]) { - t.Errorf("read 2nd frame %v, got %v", wireData[16:21], msg[:n]) - } - n, err = conn.Read(msg) - if err == nil { - t.Errorf("read not EOF") - } - if n != 0 { - t.Errorf("expect read 0, got %d", n) - } -} - -func TestHybiShortRead(t *testing.T) { - wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', - 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping - 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} - br := bufio.NewReader(bytes.NewBuffer(wireData)) - bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) - conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) - - step := 0 - pos := 0 - expectedPos := []int{2, 5, 16, 19} - expectedLen := []int{3, 2, 3, 2} - for { - msg := make([]byte, 3) - n, err := conn.Read(msg) - if step >= len(expectedPos) { - if err == nil { - t.Errorf("read not EOF") - } - if n != 0 { - t.Errorf("expect read 0, got %d", n) - } - return - } - pos = expectedPos[step] - endPos := pos + expectedLen[step] - if err != nil { - t.Errorf("read from %d, got error %q", pos, err) - return - } - if n != endPos-pos { - t.Errorf("read from %d, expect %d, got %d", pos, endPos-pos, n) - } - if !bytes.Equal(wireData[pos:endPos], msg[:n]) { - t.Errorf("read from %d, frame %v, got %v", pos, wireData[pos:endPos], msg[:n]) - } - step++ - } -} - -func TestHybiServerRead(t *testing.T) { - wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, - 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello - 0x89, 0x85, 0xcc, 0x55, 0x80, 0x20, - 0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello - 0x81, 0x85, 0xed, 0x83, 0xb4, 0x24, - 0x9a, 0xec, 0xc6, 0x48, 0x89, // world - } - br := bufio.NewReader(bytes.NewBuffer(wireData)) - bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) - conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) - - expected := [][]byte{[]byte("hello"), []byte("world")} - - msg := make([]byte, 512) - n, err := conn.Read(msg) - if err != nil { - t.Errorf("read 1st frame, error %q", err) - } - if n != 5 { - t.Errorf("read 1st frame, expect 5, got %d", n) - } - if !bytes.Equal(expected[0], msg[:n]) { - t.Errorf("read 1st frame %q, got %q", expected[0], msg[:n]) - } - - n, err = conn.Read(msg) - if err != nil { - t.Errorf("read 2nd frame, error %q", err) - } - if n != 5 { - t.Errorf("read 2nd frame, expect 5, got %d", n) - } - if !bytes.Equal(expected[1], msg[:n]) { - t.Errorf("read 2nd frame %q, got %q", expected[1], msg[:n]) - } - - n, err = conn.Read(msg) - if err == nil { - t.Errorf("read not EOF") - } - if n != 0 { - t.Errorf("expect read 0, got %d", n) - } -} - -func TestHybiServerReadWithoutMasking(t *testing.T) { - wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'} - br := bufio.NewReader(bytes.NewBuffer(wireData)) - bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) - conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) - // server MUST close the connection upon receiving a non-masked frame. - msg := make([]byte, 512) - _, err := conn.Read(msg) - if err != io.EOF { - t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) - } -} - -func TestHybiClientReadWithMasking(t *testing.T) { - wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, - 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello - } - br := bufio.NewReader(bytes.NewBuffer(wireData)) - bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) - conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) - - // client MUST close the connection upon receiving a masked frame. - msg := make([]byte, 512) - _, err := conn.Read(msg) - if err != io.EOF { - t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) - } -} - -// Test the hybiServerHandshaker supports firefox implementation and -// checks Connection request header include (but it's not necessary -// equal to) "upgrade" -func TestHybiServerFirefoxHandshake(t *testing.T) { - config := new(Config) - handshaker := &hybiServerHandshaker{Config: config} - br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 -Host: server.example.com -Upgrade: websocket -Connection: keep-alive, upgrade -Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== -Origin: http://example.com -Sec-WebSocket-Protocol: chat, superchat -Sec-WebSocket-Version: 13 - -`)) - req, err := http.ReadRequest(br) - if err != nil { - t.Fatal("request", err) - } - code, err := handshaker.ReadHandshake(br, req) - if err != nil { - t.Errorf("handshake failed: %v", err) - } - if code != http.StatusSwitchingProtocols { - t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) - } - b := bytes.NewBuffer([]byte{}) - bw := bufio.NewWriter(b) - - config.Protocol = []string{"chat"} - - err = handshaker.AcceptHandshake(bw) - if err != nil { - t.Errorf("handshake response failed: %v", err) - } - expectedResponse := strings.Join([]string{ - "HTTP/1.1 101 Switching Protocols", - "Upgrade: websocket", - "Connection: Upgrade", - "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", - "Sec-WebSocket-Protocol: chat", - "", ""}, "\r\n") - - if b.String() != expectedResponse { - t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) - } -} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index e242c89a..1f4f7be4 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -241,7 +241,10 @@ func (ws *Conn) Close() error { return err1 } +// IsClientConn reports whether ws is a client-side connection. func (ws *Conn) IsClientConn() bool { return ws.request == nil } + +// IsServerConn reports whether ws is a server-side connection. func (ws *Conn) IsServerConn() bool { return ws.request != nil } // LocalAddr returns the WebSocket Origin for the connection for client, or diff --git a/vendor/golang.org/x/net/websocket/websocket_test.go b/vendor/golang.org/x/net/websocket/websocket_test.go deleted file mode 100644 index 2054ce85..00000000 --- a/vendor/golang.org/x/net/websocket/websocket_test.go +++ /dev/null @@ -1,665 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "crypto/rand" - "fmt" - "io" - "log" - "net" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -var serverAddr string -var once sync.Once - -func echoServer(ws *Conn) { - defer ws.Close() - io.Copy(ws, ws) -} - -type Count struct { - S string - N int -} - -func countServer(ws *Conn) { - defer ws.Close() - for { - var count Count - err := JSON.Receive(ws, &count) - if err != nil { - return - } - count.N++ - count.S = strings.Repeat(count.S, count.N) - err = JSON.Send(ws, count) - if err != nil { - return - } - } -} - -type testCtrlAndDataHandler struct { - hybiFrameHandler -} - -func (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) { - h.hybiFrameHandler.conn.wio.Lock() - defer h.hybiFrameHandler.conn.wio.Unlock() - w, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame) - if err != nil { - return 0, err - } - n, err := w.Write(b) - w.Close() - return n, err -} - -func ctrlAndDataServer(ws *Conn) { - defer ws.Close() - h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} - ws.frameHandler = h - - go func() { - for i := 0; ; i++ { - var b []byte - if i%2 != 0 { // with or without payload - b = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-SERVER", i)) - } - if _, err := h.WritePing(b); err != nil { - break - } - if _, err := h.WritePong(b); err != nil { // unsolicited pong - break - } - time.Sleep(10 * time.Millisecond) - } - }() - - b := make([]byte, 128) - for { - n, err := ws.Read(b) - if err != nil { - break - } - if _, err := ws.Write(b[:n]); err != nil { - break - } - } -} - -func subProtocolHandshake(config *Config, req *http.Request) error { - for _, proto := range config.Protocol { - if proto == "chat" { - config.Protocol = []string{proto} - return nil - } - } - return ErrBadWebSocketProtocol -} - -func subProtoServer(ws *Conn) { - for _, proto := range ws.Config().Protocol { - io.WriteString(ws, proto) - } -} - -func startServer() { - http.Handle("/echo", Handler(echoServer)) - http.Handle("/count", Handler(countServer)) - http.Handle("/ctrldata", Handler(ctrlAndDataServer)) - subproto := Server{ - Handshake: subProtocolHandshake, - Handler: Handler(subProtoServer), - } - http.Handle("/subproto", subproto) - server := httptest.NewServer(nil) - serverAddr = server.Listener.Addr().String() - log.Print("Test WebSocket server listening on ", serverAddr) -} - -func newConfig(t *testing.T, path string) *Config { - config, _ := NewConfig(fmt.Sprintf("ws://%s%s", serverAddr, path), "http://localhost") - return config -} - -func TestEcho(t *testing.T) { - once.Do(startServer) - - // websocket.Dial() - client, err := net.Dial("tcp", serverAddr) - if err != nil { - t.Fatal("dialing", err) - } - conn, err := NewClient(newConfig(t, "/echo"), client) - if err != nil { - t.Errorf("WebSocket handshake error: %v", err) - return - } - - msg := []byte("hello, world\n") - if _, err := conn.Write(msg); err != nil { - t.Errorf("Write: %v", err) - } - var actual_msg = make([]byte, 512) - n, err := conn.Read(actual_msg) - if err != nil { - t.Errorf("Read: %v", err) - } - actual_msg = actual_msg[0:n] - if !bytes.Equal(msg, actual_msg) { - t.Errorf("Echo: expected %q got %q", msg, actual_msg) - } - conn.Close() -} - -func TestAddr(t *testing.T) { - once.Do(startServer) - - // websocket.Dial() - client, err := net.Dial("tcp", serverAddr) - if err != nil { - t.Fatal("dialing", err) - } - conn, err := NewClient(newConfig(t, "/echo"), client) - if err != nil { - t.Errorf("WebSocket handshake error: %v", err) - return - } - - ra := conn.RemoteAddr().String() - if !strings.HasPrefix(ra, "ws://") || !strings.HasSuffix(ra, "/echo") { - t.Errorf("Bad remote addr: %v", ra) - } - la := conn.LocalAddr().String() - if !strings.HasPrefix(la, "http://") { - t.Errorf("Bad local addr: %v", la) - } - conn.Close() -} - -func TestCount(t *testing.T) { - once.Do(startServer) - - // websocket.Dial() - client, err := net.Dial("tcp", serverAddr) - if err != nil { - t.Fatal("dialing", err) - } - conn, err := NewClient(newConfig(t, "/count"), client) - if err != nil { - t.Errorf("WebSocket handshake error: %v", err) - return - } - - var count Count - count.S = "hello" - if err := JSON.Send(conn, count); err != nil { - t.Errorf("Write: %v", err) - } - if err := JSON.Receive(conn, &count); err != nil { - t.Errorf("Read: %v", err) - } - if count.N != 1 { - t.Errorf("count: expected %d got %d", 1, count.N) - } - if count.S != "hello" { - t.Errorf("count: expected %q got %q", "hello", count.S) - } - if err := JSON.Send(conn, count); err != nil { - t.Errorf("Write: %v", err) - } - if err := JSON.Receive(conn, &count); err != nil { - t.Errorf("Read: %v", err) - } - if count.N != 2 { - t.Errorf("count: expected %d got %d", 2, count.N) - } - if count.S != "hellohello" { - t.Errorf("count: expected %q got %q", "hellohello", count.S) - } - conn.Close() -} - -func TestWithQuery(t *testing.T) { - once.Do(startServer) - - client, err := net.Dial("tcp", serverAddr) - if err != nil { - t.Fatal("dialing", err) - } - - config := newConfig(t, "/echo") - config.Location, err = url.ParseRequestURI(fmt.Sprintf("ws://%s/echo?q=v", serverAddr)) - if err != nil { - t.Fatal("location url", err) - } - - ws, err := NewClient(config, client) - if err != nil { - t.Errorf("WebSocket handshake: %v", err) - return - } - ws.Close() -} - -func testWithProtocol(t *testing.T, subproto []string) (string, error) { - once.Do(startServer) - - client, err := net.Dial("tcp", serverAddr) - if err != nil { - t.Fatal("dialing", err) - } - - config := newConfig(t, "/subproto") - config.Protocol = subproto - - ws, err := NewClient(config, client) - if err != nil { - return "", err - } - msg := make([]byte, 16) - n, err := ws.Read(msg) - if err != nil { - return "", err - } - ws.Close() - return string(msg[:n]), nil -} - -func TestWithProtocol(t *testing.T) { - proto, err := testWithProtocol(t, []string{"chat"}) - if err != nil { - t.Errorf("SubProto: unexpected error: %v", err) - } - if proto != "chat" { - t.Errorf("SubProto: expected %q, got %q", "chat", proto) - } -} - -func TestWithTwoProtocol(t *testing.T) { - proto, err := testWithProtocol(t, []string{"test", "chat"}) - if err != nil { - t.Errorf("SubProto: unexpected error: %v", err) - } - if proto != "chat" { - t.Errorf("SubProto: expected %q, got %q", "chat", proto) - } -} - -func TestWithBadProtocol(t *testing.T) { - _, err := testWithProtocol(t, []string{"test"}) - if err != ErrBadStatus { - t.Errorf("SubProto: expected %v, got %v", ErrBadStatus, err) - } -} - -func TestHTTP(t *testing.T) { - once.Do(startServer) - - // If the client did not send a handshake that matches the protocol - // specification, the server MUST return an HTTP response with an - // appropriate error code (such as 400 Bad Request) - resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr)) - if err != nil { - t.Errorf("Get: error %#v", err) - return - } - if resp == nil { - t.Error("Get: resp is null") - return - } - if resp.StatusCode != http.StatusBadRequest { - t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode) - } -} - -func TestTrailingSpaces(t *testing.T) { - // http://code.google.com/p/go/issues/detail?id=955 - // The last runs of this create keys with trailing spaces that should not be - // generated by the client. - once.Do(startServer) - config := newConfig(t, "/echo") - for i := 0; i < 30; i++ { - // body - ws, err := DialConfig(config) - if err != nil { - t.Errorf("Dial #%d failed: %v", i, err) - break - } - ws.Close() - } -} - -func TestDialConfigBadVersion(t *testing.T) { - once.Do(startServer) - config := newConfig(t, "/echo") - config.Version = 1234 - - _, err := DialConfig(config) - - if dialerr, ok := err.(*DialError); ok { - if dialerr.Err != ErrBadProtocolVersion { - t.Errorf("dial expected err %q but got %q", ErrBadProtocolVersion, dialerr.Err) - } - } -} - -func TestDialConfigWithDialer(t *testing.T) { - once.Do(startServer) - config := newConfig(t, "/echo") - config.Dialer = &net.Dialer{ - Deadline: time.Now().Add(-time.Minute), - } - _, err := DialConfig(config) - dialerr, ok := err.(*DialError) - if !ok { - t.Fatalf("DialError expected, got %#v", err) - } - neterr, ok := dialerr.Err.(*net.OpError) - if !ok { - t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) - } - if !neterr.Timeout() { - t.Fatalf("expected timeout error, got %#v", neterr) - } -} - -func TestSmallBuffer(t *testing.T) { - // http://code.google.com/p/go/issues/detail?id=1145 - // Read should be able to handle reading a fragment of a frame. - once.Do(startServer) - - // websocket.Dial() - client, err := net.Dial("tcp", serverAddr) - if err != nil { - t.Fatal("dialing", err) - } - conn, err := NewClient(newConfig(t, "/echo"), client) - if err != nil { - t.Errorf("WebSocket handshake error: %v", err) - return - } - - msg := []byte("hello, world\n") - if _, err := conn.Write(msg); err != nil { - t.Errorf("Write: %v", err) - } - var small_msg = make([]byte, 8) - n, err := conn.Read(small_msg) - if err != nil { - t.Errorf("Read: %v", err) - } - if !bytes.Equal(msg[:len(small_msg)], small_msg) { - t.Errorf("Echo: expected %q got %q", msg[:len(small_msg)], small_msg) - } - var second_msg = make([]byte, len(msg)) - n, err = conn.Read(second_msg) - if err != nil { - t.Errorf("Read: %v", err) - } - second_msg = second_msg[0:n] - if !bytes.Equal(msg[len(small_msg):], second_msg) { - t.Errorf("Echo: expected %q got %q", msg[len(small_msg):], second_msg) - } - conn.Close() -} - -var parseAuthorityTests = []struct { - in *url.URL - out string -}{ - { - &url.URL{ - Scheme: "ws", - Host: "www.google.com", - }, - "www.google.com:80", - }, - { - &url.URL{ - Scheme: "wss", - Host: "www.google.com", - }, - "www.google.com:443", - }, - { - &url.URL{ - Scheme: "ws", - Host: "www.google.com:80", - }, - "www.google.com:80", - }, - { - &url.URL{ - Scheme: "wss", - Host: "www.google.com:443", - }, - "www.google.com:443", - }, - // some invalid ones for parseAuthority. parseAuthority doesn't - // concern itself with the scheme unless it actually knows about it - { - &url.URL{ - Scheme: "http", - Host: "www.google.com", - }, - "www.google.com", - }, - { - &url.URL{ - Scheme: "http", - Host: "www.google.com:80", - }, - "www.google.com:80", - }, - { - &url.URL{ - Scheme: "asdf", - Host: "127.0.0.1", - }, - "127.0.0.1", - }, - { - &url.URL{ - Scheme: "asdf", - Host: "www.google.com", - }, - "www.google.com", - }, -} - -func TestParseAuthority(t *testing.T) { - for _, tt := range parseAuthorityTests { - out := parseAuthority(tt.in) - if out != tt.out { - t.Errorf("got %v; want %v", out, tt.out) - } - } -} - -type closerConn struct { - net.Conn - closed int // count of the number of times Close was called -} - -func (c *closerConn) Close() error { - c.closed++ - return c.Conn.Close() -} - -func TestClose(t *testing.T) { - if runtime.GOOS == "plan9" { - t.Skip("see golang.org/issue/11454") - } - - once.Do(startServer) - - conn, err := net.Dial("tcp", serverAddr) - if err != nil { - t.Fatal("dialing", err) - } - - cc := closerConn{Conn: conn} - - client, err := NewClient(newConfig(t, "/echo"), &cc) - if err != nil { - t.Fatalf("WebSocket handshake: %v", err) - } - - // set the deadline to ten minutes ago, which will have expired by the time - // client.Close sends the close status frame. - conn.SetDeadline(time.Now().Add(-10 * time.Minute)) - - if err := client.Close(); err == nil { - t.Errorf("ws.Close(): expected error, got %v", err) - } - if cc.closed < 1 { - t.Fatalf("ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v", cc.closed) - } -} - -var originTests = []struct { - req *http.Request - origin *url.URL -}{ - { - req: &http.Request{ - Header: http.Header{ - "Origin": []string{"http://www.example.com"}, - }, - }, - origin: &url.URL{ - Scheme: "http", - Host: "www.example.com", - }, - }, - { - req: &http.Request{}, - }, -} - -func TestOrigin(t *testing.T) { - conf := newConfig(t, "/echo") - conf.Version = ProtocolVersionHybi13 - for i, tt := range originTests { - origin, err := Origin(conf, tt.req) - if err != nil { - t.Error(err) - continue - } - if !reflect.DeepEqual(origin, tt.origin) { - t.Errorf("#%d: got origin %v; want %v", i, origin, tt.origin) - continue - } - } -} - -func TestCtrlAndData(t *testing.T) { - once.Do(startServer) - - c, err := net.Dial("tcp", serverAddr) - if err != nil { - t.Fatal(err) - } - ws, err := NewClient(newConfig(t, "/ctrldata"), c) - if err != nil { - t.Fatal(err) - } - defer ws.Close() - - h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} - ws.frameHandler = h - - b := make([]byte, 128) - for i := 0; i < 2; i++ { - data := []byte(fmt.Sprintf("#%d-DATA-FRAME-FROM-CLIENT", i)) - if _, err := ws.Write(data); err != nil { - t.Fatalf("#%d: %v", i, err) - } - var ctrl []byte - if i%2 != 0 { // with or without payload - ctrl = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-CLIENT", i)) - } - if _, err := h.WritePing(ctrl); err != nil { - t.Fatalf("#%d: %v", i, err) - } - n, err := ws.Read(b) - if err != nil { - t.Fatalf("#%d: %v", i, err) - } - if !bytes.Equal(b[:n], data) { - t.Fatalf("#%d: got %v; want %v", i, b[:n], data) - } - } -} - -func TestCodec_ReceiveLimited(t *testing.T) { - const limit = 2048 - var payloads [][]byte - for _, size := range []int{ - 1024, - 2048, - 4096, // receive of this message would be interrupted due to limit - 2048, // this one is to make sure next receive recovers discarding leftovers - } { - b := make([]byte, size) - rand.Read(b) - payloads = append(payloads, b) - } - handlerDone := make(chan struct{}) - limitedHandler := func(ws *Conn) { - defer close(handlerDone) - ws.MaxPayloadBytes = limit - defer ws.Close() - for i, p := range payloads { - t.Logf("payload #%d (size %d, exceeds limit: %v)", i, len(p), len(p) > limit) - var recv []byte - err := Message.Receive(ws, &recv) - switch err { - case nil: - case ErrFrameTooLarge: - if len(p) <= limit { - t.Fatalf("unexpected frame size limit: expected %d bytes of payload having limit at %d", len(p), limit) - } - continue - default: - t.Fatalf("unexpected error: %v (want either nil or ErrFrameTooLarge)", err) - } - if len(recv) > limit { - t.Fatalf("received %d bytes of payload having limit at %d", len(recv), limit) - } - if !bytes.Equal(p, recv) { - t.Fatalf("received payload differs:\ngot:\t%v\nwant:\t%v", recv, p) - } - } - } - server := httptest.NewServer(Handler(limitedHandler)) - defer server.CloseClientConnections() - defer server.Close() - addr := server.Listener.Addr().String() - ws, err := Dial("ws://"+addr+"/", "", "http://localhost/") - if err != nil { - t.Fatal(err) - } - defer ws.Close() - for i, p := range payloads { - if err := Message.Send(ws, p); err != nil { - t.Fatalf("payload #%d (size %d): %v", i, len(p), err) - } - } - <-handlerDone -} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf.go b/vendor/golang.org/x/net/xsrftoken/xsrf.go deleted file mode 100644 index bc861e1f..00000000 --- a/vendor/golang.org/x/net/xsrftoken/xsrf.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xsrftoken provides methods for generating and validating secure XSRF tokens. -package xsrftoken // import "golang.org/x/net/xsrftoken" - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/subtle" - "encoding/base64" - "fmt" - "strconv" - "strings" - "time" -) - -// Timeout is the duration for which XSRF tokens are valid. -// It is exported so clients may set cookie timeouts that match generated tokens. -const Timeout = 24 * time.Hour - -// clean sanitizes a string for inclusion in a token by replacing all ":"s. -func clean(s string) string { - return strings.Replace(s, ":", "_", -1) -} - -// Generate returns a URL-safe secure XSRF token that expires in 24 hours. -// -// key is a secret key for your application; it must be non-empty. -// userID is an optional unique identifier for the user. -// actionID is an optional action the user is taking (e.g. POSTing to a particular path). -func Generate(key, userID, actionID string) string { - return generateTokenAtTime(key, userID, actionID, time.Now()) -} - -// generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now. -func generateTokenAtTime(key, userID, actionID string, now time.Time) string { - if len(key) == 0 { - panic("zero length xsrf secret key") - } - // Round time up and convert to milliseconds. - milliTime := (now.UnixNano() + 1e6 - 1) / 1e6 - - h := hmac.New(sha1.New, []byte(key)) - fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), milliTime) - - // Get the padded base64 string then removing the padding. - tok := string(h.Sum(nil)) - tok = base64.URLEncoding.EncodeToString([]byte(tok)) - tok = strings.TrimRight(tok, "=") - - return fmt.Sprintf("%s:%d", tok, milliTime) -} - -// Valid reports whether a token is a valid, unexpired token returned by Generate. -func Valid(token, key, userID, actionID string) bool { - return validTokenAtTime(token, key, userID, actionID, time.Now()) -} - -// validTokenAtTime reports whether a token is valid at the given time. -func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool { - if len(key) == 0 { - panic("zero length xsrf secret key") - } - // Extract the issue time of the token. - sep := strings.LastIndex(token, ":") - if sep < 0 { - return false - } - millis, err := strconv.ParseInt(token[sep+1:], 10, 64) - if err != nil { - return false - } - issueTime := time.Unix(0, millis*1e6) - - // Check that the token is not expired. - if now.Sub(issueTime) >= Timeout { - return false - } - - // Check that the token is not from the future. - // Allow 1 minute grace period in case the token is being verified on a - // machine whose clock is behind the machine that issued the token. - if issueTime.After(now.Add(1 * time.Minute)) { - return false - } - - expected := generateTokenAtTime(key, userID, actionID, issueTime) - - // Check that the token matches the expected value. - // Use constant time comparison to avoid timing attacks. - return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1 -} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf_test.go b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go deleted file mode 100644 index 6c8e7d9b..00000000 --- a/vendor/golang.org/x/net/xsrftoken/xsrf_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xsrftoken - -import ( - "encoding/base64" - "testing" - "time" -) - -const ( - key = "quay" - userID = "12345678" - actionID = "POST /form" -) - -var ( - now = time.Now() - oneMinuteFromNow = now.Add(1 * time.Minute) -) - -func TestValidToken(t *testing.T) { - tok := generateTokenAtTime(key, userID, actionID, now) - if !validTokenAtTime(tok, key, userID, actionID, oneMinuteFromNow) { - t.Error("One second later: Expected token to be valid") - } - if !validTokenAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) { - t.Error("Just before timeout: Expected token to be valid") - } - if !validTokenAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute+1*time.Millisecond)) { - t.Error("One minute in the past: Expected token to be valid") - } -} - -// TestSeparatorReplacement tests that separators are being correctly substituted -func TestSeparatorReplacement(t *testing.T) { - tok := generateTokenAtTime("foo:bar", "baz", "wah", now) - tok2 := generateTokenAtTime("foo", "bar:baz", "wah", now) - if tok == tok2 { - t.Errorf("Expected generated tokens to be different") - } -} - -func TestInvalidToken(t *testing.T) { - invalidTokenTests := []struct { - name, key, userID, actionID string - t time.Time - }{ - {"Bad key", "foobar", userID, actionID, oneMinuteFromNow}, - {"Bad userID", key, "foobar", actionID, oneMinuteFromNow}, - {"Bad actionID", key, userID, "foobar", oneMinuteFromNow}, - {"Expired", key, userID, actionID, now.Add(Timeout + 1*time.Millisecond)}, - {"More than 1 minute from the future", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)}, - } - - tok := generateTokenAtTime(key, userID, actionID, now) - for _, itt := range invalidTokenTests { - if validTokenAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) { - t.Errorf("%v: Expected token to be invalid", itt.name) - } - } -} - -// TestValidateBadData primarily tests that no unexpected panics are triggered -// during parsing -func TestValidateBadData(t *testing.T) { - badDataTests := []struct { - name, tok string - }{ - {"Invalid Base64", "ASDab24(@)$*=="}, - {"No delimiter", base64.URLEncoding.EncodeToString([]byte("foobar12345678"))}, - {"Invalid time", base64.URLEncoding.EncodeToString([]byte("foobar:foobar"))}, - {"Wrong length", "1234" + generateTokenAtTime(key, userID, actionID, now)}, - } - - for _, bdt := range badDataTests { - if validTokenAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) { - t.Errorf("%v: Expected token to be invalid", bdt.name) - } - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt new file mode 100644 index 00000000..5a92077b --- /dev/null +++ b/vendor/modules.txt @@ -0,0 +1,68 @@ +# github.com/ancientlore/go-avltree v0.0.0-20180928192319-da83409fdfb7 +github.com/ancientlore/go-avltree +# github.com/codegangsta/negroni v0.2.0 +github.com/codegangsta/negroni +# github.com/dgrijalva/jwt-go v3.0.0+incompatible +github.com/dgrijalva/jwt-go +# github.com/eclipse/paho.mqtt.golang v1.2.0 +github.com/eclipse/paho.mqtt.golang +github.com/eclipse/paho.mqtt.golang/packets +# github.com/farshidtz/elog v1.0.1 +github.com/farshidtz/elog +# github.com/farshidtz/mqtt-match v1.0.1 +github.com/farshidtz/mqtt-match +# github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db +github.com/golang/snappy +# github.com/gorilla/context v1.1.1 +github.com/gorilla/context +# github.com/gorilla/mux v1.5.0 +github.com/gorilla/mux +# github.com/justinas/alice v0.0.0-20160512134231-052b8b6c18ed +github.com/justinas/alice +# github.com/linksmart/go-sec v1.0.1 +github.com/linksmart/go-sec/auth/keycloak/obtainer +github.com/linksmart/go-sec/auth/keycloak/validator +github.com/linksmart/go-sec/auth/obtainer +github.com/linksmart/go-sec/auth/validator +github.com/linksmart/go-sec/authz +# github.com/linksmart/service-catalog v2.3.4+incompatible +github.com/linksmart/service-catalog/discovery +# github.com/linksmart/service-catalog/v2 v2.4.1 +github.com/linksmart/service-catalog/v2/catalog +github.com/linksmart/service-catalog/v2/utils +# github.com/miekg/dns v0.0.0-20170818131442-e4205768578d +github.com/miekg/dns +# github.com/oleksandr/bonjour v0.0.0-20160508152359-5dcf00d8b228 +github.com/oleksandr/bonjour +# github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c +github.com/pborman/uuid +# github.com/satori/go.uuid v1.2.0 +github.com/satori/go.uuid +# github.com/syndtr/goleveldb v1.0.0 +github.com/syndtr/goleveldb/leveldb +github.com/syndtr/goleveldb/leveldb/cache +github.com/syndtr/goleveldb/leveldb/comparer +github.com/syndtr/goleveldb/leveldb/errors +github.com/syndtr/goleveldb/leveldb/filter +github.com/syndtr/goleveldb/leveldb/iterator +github.com/syndtr/goleveldb/leveldb/journal +github.com/syndtr/goleveldb/leveldb/memdb +github.com/syndtr/goleveldb/leveldb/opt +github.com/syndtr/goleveldb/leveldb/storage +github.com/syndtr/goleveldb/leveldb/table +github.com/syndtr/goleveldb/leveldb/util +# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f +github.com/xeipuuv/gojsonpointer +# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 +github.com/xeipuuv/gojsonreference +# github.com/xeipuuv/gojsonschema v1.2.0 +github.com/xeipuuv/gojsonschema +# golang.org/x/net v0.0.0-20180906233101-161cd47e91fd +golang.org/x/net/bpf +golang.org/x/net/internal/iana +golang.org/x/net/internal/socket +golang.org/x/net/internal/socks +golang.org/x/net/ipv4 +golang.org/x/net/ipv6 +golang.org/x/net/proxy +golang.org/x/net/websocket