From f5129a60fe70ac53b68ae0f661c769e40a0ffdc2 Mon Sep 17 00:00:00 2001 From: Dishon Date: Wed, 10 Feb 2021 19:47:12 +0000 Subject: [PATCH 1/5] The first commit --- .gitignore | 14 + .gitlab-ci.yml | 33 + .gitlab/ci/prepare.yml | 52 ++ .gitlab/ci/test.yml | 88 +++ .gitlab/issue_templates/release.md | 64 ++ .../Security Release.md | 33 + .golangci.yml | 63 ++ .tool-versions | 1 + CHANGELOG | 336 ++++++++ CONTRIBUTING.md | 613 +++++++++++++++ LICENSE | 19 + Makefile | 34 + Makefile.build.mk | 26 + Makefile.internal.mk | 48 ++ Makefile.util.mk | 61 ++ PROCESS.md | 80 ++ README.md | 331 ++++++++ VERSION | 1 + app.go | 553 +++++++++++++ app_config.go | 71 ++ app_test.go | 115 +++ config_test.go | 54 ++ daemon.go | 358 +++++++++ doc/dependency_decisions.yml | 133 ++++ doc/development.md | 81 ++ go.mod | 42 + go.sum | 553 +++++++++++++ helpers.go | 46 ++ internal/acme/acme.go | 62 ++ internal/acme/acme_test.go | 58 ++ internal/artifact/artifact.go | 185 +++++ internal/artifact/artifact_test.go | 277 +++++++ internal/auth/auth.go | 672 ++++++++++++++++ internal/auth/auth_code.go | 147 ++++ internal/auth/auth_code_test.go | 99 +++ internal/auth/auth_test.go | 468 +++++++++++ internal/config/config.go | 18 + internal/domain/domain.go | 215 ++++++ internal/domain/domain_test.go | 219 ++++++ internal/domain/resolver.go | 14 + internal/fixture/fixtures.go | 60 ++ internal/handlers/handlers.go | 68 ++ internal/handlers/handlers_test.go | 161 ++++ internal/host/host.go | 23 + internal/host/host_test.go | 18 + internal/httperrors/httperrors.go | 202 +++++ internal/httperrors/httperrors_test.go | 117 +++ internal/httprange/http_ranged_reader.go | 63 ++ internal/httprange/http_ranged_reader_test.go | 293 +++++++ internal/httprange/http_reader.go | 215 ++++++ internal/httprange/http_reader_test.go | 341 ++++++++ internal/httprange/resource.go | 129 ++++ internal/httprange/resource_test.go | 108 +++ internal/httptransport/LICENSE | 27 + internal/httptransport/trace.go | 77 ++ internal/httptransport/transport.go | 142 ++++ internal/httptransport/transport_darwin.go | 118 +++ internal/httptransport/transport_test.go | 135 ++++ internal/httputil/LICENSE | 27 + internal/httputil/README.md | 5 + internal/httputil/header/header.go | 298 +++++++ internal/httputil/negotiate.go | 80 ++ internal/interface.go | 18 + internal/jail/jail.go | 246 ++++++ internal/jail/jail_test.go | 298 +++++++ internal/jail/mount_linux.go | 55 ++ internal/jail/mount_not_supported.go | 27 + internal/logging/logging.go | 109 +++ internal/logging/logging_test.go | 102 +++ internal/middleware/headers.go | 31 + internal/middleware/headers_test.go | 126 +++ internal/mocks/mocks.go | 129 ++++ internal/netutil/shared_limit_listener.go | 113 +++ internal/redirects/redirects.go | 202 +++++ .../redirects/redirects_benchmark_test.go | 69 ++ internal/redirects/redirects_test.go | 296 +++++++ internal/rejectmethods/middleware.go | 31 + internal/rejectmethods/middleware_test.go | 43 ++ internal/request/request.go | 57 ++ internal/request/request_test.go | 89 +++ internal/serving/disk/errors.go | 18 + internal/serving/disk/helpers.go | 101 +++ internal/serving/disk/local/serving.go | 16 + internal/serving/disk/local/serving_test.go | 98 +++ internal/serving/disk/reader.go | 274 +++++++ internal/serving/disk/reader_test.go | 68 ++ internal/serving/disk/serving.go | 58 ++ internal/serving/disk/symlink/LICENSE | 27 + internal/serving/disk/symlink/PATENTS | 22 + internal/serving/disk/symlink/README.md | 7 + internal/serving/disk/symlink/path_test.go | 195 +++++ internal/serving/disk/symlink/shims.go | 17 + internal/serving/disk/symlink/symlink.go | 153 ++++ internal/serving/disk/zip/serving.go | 17 + internal/serving/disk/zip/serving_test.go | 128 +++ internal/serving/handler.go | 12 + internal/serving/lookup_path.go | 12 + internal/serving/request.go | 35 + internal/serving/serverless/certs.go | 26 + internal/serving/serverless/cluster.go | 28 + internal/serving/serverless/director.go | 20 + internal/serving/serverless/errors.go | 26 + internal/serving/serverless/serverless.go | 73 ++ .../serving/serverless/serverless_test.go | 165 ++++ internal/serving/serverless/transport.go | 51 ++ internal/serving/serving.go | 10 + internal/source/config.go | 7 + internal/source/disk/config.go | 57 ++ internal/source/disk/config_test.go | 65 ++ internal/source/disk/custom.go | 37 + internal/source/disk/disk.go | 55 ++ internal/source/disk/domain_test.go | 507 ++++++++++++ internal/source/disk/group.go | 104 +++ internal/source/disk/group_test.go | 97 +++ internal/source/disk/map.go | 307 ++++++++ internal/source/disk/map_test.go | 253 ++++++ internal/source/domains.go | 163 ++++ internal/source/domains_test.go | 195 +++++ internal/source/gitlab/api/client.go | 14 + internal/source/gitlab/api/lookup.go | 8 + internal/source/gitlab/api/lookup_path.go | 32 + internal/source/gitlab/api/resolver.go | 15 + internal/source/gitlab/api/virtual_domain.go | 10 + internal/source/gitlab/cache/cache.go | 116 +++ internal/source/gitlab/cache/cache_test.go | 229 ++++++ internal/source/gitlab/cache/entry.go | 109 +++ internal/source/gitlab/cache/entry_test.go | 64 ++ internal/source/gitlab/cache/memstore.go | 61 ++ internal/source/gitlab/cache/retriever.go | 116 +++ .../source/gitlab/cache/retriever_test.go | 27 + internal/source/gitlab/cache/store.go | 7 + internal/source/gitlab/client/client.go | 209 +++++ internal/source/gitlab/client/client_stub.go | 42 + internal/source/gitlab/client/client_test.go | 354 +++++++++ internal/source/gitlab/client/config.go | 13 + .../client/testdata/test.gitlab.io.json | 36 + internal/source/gitlab/factory.go | 57 ++ internal/source/gitlab/factory_test.go | 65 ++ internal/source/gitlab/gitlab.go | 102 +++ internal/source/gitlab/gitlab_poll.go | 40 + internal/source/gitlab/gitlab_poll_test.go | 80 ++ internal/source/gitlab/gitlab_test.go | 100 +++ internal/source/source.go | 9 + internal/source/source_mock.go | 36 + internal/testhelpers/chdir.go | 29 + internal/testhelpers/testhelpers.go | 57 ++ internal/testhelpers/tmpdir.go | 38 + internal/tlsconfig/tlsconfig.go | 102 +++ internal/tlsconfig/tlsconfig_test.go | 71 ++ internal/validateargs/validateargs.go | 41 + internal/validateargs/validateargs_test.go | 50 ++ internal/vfs/errors.go | 18 + internal/vfs/file.go | 15 + internal/vfs/local/root.go | 107 +++ internal/vfs/local/root_test.go | 296 +++++++ internal/vfs/local/testdata/file | 1 + internal/vfs/local/testdata/link | 1 + internal/vfs/local/vfs.go | 51 ++ internal/vfs/local/vfs_test.go | 117 +++ internal/vfs/root.go | 69 ++ internal/vfs/vfs.go | 58 ++ internal/vfs/zip/archive.go | 308 ++++++++ internal/vfs/zip/archive_test.go | 474 ++++++++++++ internal/vfs/zip/deflate_reader.go | 66 ++ internal/vfs/zip/lru_cache.go | 62 ++ internal/vfs/zip/vfs.go | 214 +++++ internal/vfs/zip/vfs_test.go | 226 ++++++ main.go | 451 +++++++++++ metrics/metrics.go | 240 ++++++ multi_string_flag.go | 37 + multi_string_flag_test.go | 49 ++ server.go | 76 ++ shared/invalid-pages/.update/.gitkeep | 0 shared/lookups/new-source-test.gitlab.io.json | 16 + shared/lookups/zip-malformed.gitlab.io.json | 16 + shared/lookups/zip-not-found.gitlab.io.json | 16 + shared/lookups/zip.gitlab.io.json | 16 + shared/pages/.hidden.group/project/.gitkeep | 0 .../@hashed/hashed.gitlab.io/config.json | 7 + .../hashed.gitlab.io/public/index.html | 1 + .../CapitalProject/public/index.html | 1 + .../CapitalGroup/project/public/index.html | 1 + shared/pages/README.md | 1 + shared/pages/group.404/domain.404/config.json | 7 + .../group.404/domain.404/public/404.html | 1 + .../public/404.html | 1 + .../group.404.test.io/public/404.html | 1 + .../group.404/private_project/config.json | 5 + .../group.404/private_project/public/404.html | 1 + .../private_unauthorized/config.json | 5 + .../private_unauthorized/public/404.html | 1 + .../project.404.symlink/public/404.html | 1 + .../group.404/project.404/public/404.html | 1 + .../project.no.404/public/index.html | 1 + .../with.acme.challenge/config.json | 6 + .../.well-known/acme-challenge/existingtoken | 1 + .../acme-challenge/foldertoken/index.html | 1 + .../with.acme.challenge/public/index.html | 1 + .../group.auth.gitlab-example.com/config.json | 1 + .../public/404.html | 1 + .../public/index.html | 1 + .../public/private.project/index.html | 1 + .../group.auth/private.project.1/config.json | 1 + .../private.project.1/public/404.html | 1 + .../private.project.1/public/index.html | 1 + .../group.auth/private.project.2/config.json | 1 + .../private.project.2/public/index.html | 1 + .../group.auth/private.project/config.json | 10 + .../private.project/public/index.html | 1 + .../subgroup/private.project.1/config.json | 1 + .../private.project.1/public/index.html | 1 + .../subgroup/private.project.2/config.json | 1 + .../private.project.2/public/index.html | 1 + .../subgroup/private.project/config.json | 1 + .../private.project/public/index.html | 1 + shared/pages/group.deleted/is_file.txt | 0 .../project.deleted/public/.gitkeep | 0 .../.hidden.project/public/.gitkeep | 0 .../group.https-only/project1/config.json | 1 + .../project1/public/index.html | 1 + .../group.https-only/project2/config.json | 1 + .../project2/public/index.html | 0 .../group.https-only/project3/config.json | 8 + .../project3/public/index.html | 0 .../group.https-only/project4/config.json | 8 + .../project4/public/index.html | 0 .../group.https-only/project5/config.json | 11 + .../project5/public/index.html | 0 .../project.internal/public/.gitkeep | 0 shared/pages/group.no.projects/.gitkeep | 0 shared/pages/group.no.public/project/.gitkeep | 0 .../group.redirects/custom-domain/config.json | 7 + .../custom-domain/public/_redirects | 11 + .../public/_redirects | 11 + .../project-redirects/public/_redirects | 11 + .../public/file-override.html | 1 + .../project-redirects/public/index.html | 1 + .../project-redirects/public/magic-land.html | 1 + .../group/CapitalProject/public/index.html | 1 + .../public/index.html | 1 + .../public/index.html.br | Bin 0 -> 8 bytes .../public/index.html.gz | Bin 0 -> 34 bytes .../public/project/index.html | 1 + shared/pages/group/group.test.io/.gitkeep | 0 shared/pages/group/group.test.io/config.json | 15 + .../pages/group/group.test.io/public/.gitkeep | 0 .../group/group.test.io/public/gz-symlink | 1 + .../group/group.test.io/public/gz-symlink.br | 1 + .../group/group.test.io/public/gz-symlink.gz | 1 + .../group.test.io/public/image-nogzip.unknown | Bin 0 -> 14 bytes .../group/group.test.io/public/image.unknown | Bin 0 -> 14 bytes .../group.test.io/public/image.unknown.br | Bin 0 -> 19 bytes .../group.test.io/public/image.unknown.gz | Bin 0 -> 46 bytes .../group/group.test.io/public/index.html | 1 + .../group/group.test.io/public/index.html.br | Bin 0 -> 14 bytes .../group/group.test.io/public/index.html.gz | Bin 0 -> 40 bytes .../group/group.test.io/public/index2.html | 1 + .../group/group.test.io/public/index2.html.gz | Bin 0 -> 41 bytes .../group.test.io/public/project2/index.html | 1 + .../group.test.io/public/text-nogzip.unknown | 1 + .../group/group.test.io/public/text.unknown | 1 + .../group.test.io/public/text.unknown.br | Bin 0 -> 10 bytes .../group.test.io/public/text.unknown.gz | Bin 0 -> 37 bytes .../public/index.html | 1 + .../group/project/public/file.webmanifest | 0 shared/pages/group/project/public/index.html | 1 + .../group/project/public/subdir/index.html | 1 + shared/pages/group/project2/public/index.html | 1 + .../group/project2/public/subdir/index.html | 1 + shared/pages/group/serving/public/index.html | 6 + .../group/subgroup/project/public/index.html | 1 + .../subgroup/project/public/subdir/index.html | 1 + .../zip.gitlab.io/public-without-dirs.zip | Bin 0 -> 2117 bytes shared/pages/group/zip.gitlab.io/public.zip | Bin 0 -> 2415 bytes shared/pages/is_file | 0 test/acceptance/acceptance_test.go | 81 ++ test/acceptance/acme_test.go | 73 ++ test/acceptance/artifacts_test.go | 299 +++++++ test/acceptance/auth_test.go | 730 ++++++++++++++++++ test/acceptance/config_test.go | 66 ++ test/acceptance/encodings_test.go | 78 ++ test/acceptance/helpers_test.go | 631 +++++++++++++++ test/acceptance/metrics_test.go | 62 ++ test/acceptance/proxyv2_test.go | 57 ++ test/acceptance/redirects_test.go | 116 +++ test/acceptance/serving_test.go | 574 ++++++++++++++ test/acceptance/status_test.go | 44 ++ test/acceptance/stub_test.go | 72 ++ test/acceptance/tls_test.go | 130 ++++ test/acceptance/unknown_http_method_test.go | 23 + test/acceptance/zip_test.go | 161 ++++ tools.go | 11 + 292 files changed, 23319 insertions(+) create mode 100644 .gitignore create mode 100644 .gitlab-ci.yml create mode 100644 .gitlab/ci/prepare.yml create mode 100644 .gitlab/ci/test.yml create mode 100644 .gitlab/issue_templates/release.md create mode 100644 .gitlab/merge_request_templates/Security Release.md create mode 100644 .golangci.yml create mode 100644 .tool-versions create mode 100644 CHANGELOG create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 Makefile.build.mk create mode 100644 Makefile.internal.mk create mode 100644 Makefile.util.mk create mode 100644 PROCESS.md create mode 100644 README.md create mode 100644 VERSION create mode 100644 app.go create mode 100644 app_config.go create mode 100644 app_test.go create mode 100644 config_test.go create mode 100644 daemon.go create mode 100644 doc/dependency_decisions.yml create mode 100644 doc/development.md create mode 100644 go.mod create mode 100644 go.sum create mode 100644 helpers.go create mode 100644 internal/acme/acme.go create mode 100644 internal/acme/acme_test.go create mode 100644 internal/artifact/artifact.go create mode 100644 internal/artifact/artifact_test.go create mode 100644 internal/auth/auth.go create mode 100644 internal/auth/auth_code.go create mode 100644 internal/auth/auth_code_test.go create mode 100644 internal/auth/auth_test.go create mode 100644 internal/config/config.go create mode 100644 internal/domain/domain.go create mode 100644 internal/domain/domain_test.go create mode 100644 internal/domain/resolver.go create mode 100644 internal/fixture/fixtures.go create mode 100644 internal/handlers/handlers.go create mode 100644 internal/handlers/handlers_test.go create mode 100644 internal/host/host.go create mode 100644 internal/host/host_test.go create mode 100644 internal/httperrors/httperrors.go create mode 100644 internal/httperrors/httperrors_test.go create mode 100644 internal/httprange/http_ranged_reader.go create mode 100644 internal/httprange/http_ranged_reader_test.go create mode 100644 internal/httprange/http_reader.go create mode 100644 internal/httprange/http_reader_test.go create mode 100644 internal/httprange/resource.go create mode 100644 internal/httprange/resource_test.go create mode 100644 internal/httptransport/LICENSE create mode 100644 internal/httptransport/trace.go create mode 100644 internal/httptransport/transport.go create mode 100644 internal/httptransport/transport_darwin.go create mode 100644 internal/httptransport/transport_test.go create mode 100644 internal/httputil/LICENSE create mode 100644 internal/httputil/README.md create mode 100644 internal/httputil/header/header.go create mode 100644 internal/httputil/negotiate.go create mode 100644 internal/interface.go create mode 100644 internal/jail/jail.go create mode 100644 internal/jail/jail_test.go create mode 100644 internal/jail/mount_linux.go create mode 100644 internal/jail/mount_not_supported.go create mode 100644 internal/logging/logging.go create mode 100644 internal/logging/logging_test.go create mode 100644 internal/middleware/headers.go create mode 100644 internal/middleware/headers_test.go create mode 100644 internal/mocks/mocks.go create mode 100644 internal/netutil/shared_limit_listener.go create mode 100644 internal/redirects/redirects.go create mode 100644 internal/redirects/redirects_benchmark_test.go create mode 100644 internal/redirects/redirects_test.go create mode 100644 internal/rejectmethods/middleware.go create mode 100644 internal/rejectmethods/middleware_test.go create mode 100644 internal/request/request.go create mode 100644 internal/request/request_test.go create mode 100644 internal/serving/disk/errors.go create mode 100644 internal/serving/disk/helpers.go create mode 100644 internal/serving/disk/local/serving.go create mode 100644 internal/serving/disk/local/serving_test.go create mode 100644 internal/serving/disk/reader.go create mode 100644 internal/serving/disk/reader_test.go create mode 100644 internal/serving/disk/serving.go create mode 100644 internal/serving/disk/symlink/LICENSE create mode 100644 internal/serving/disk/symlink/PATENTS create mode 100644 internal/serving/disk/symlink/README.md create mode 100644 internal/serving/disk/symlink/path_test.go create mode 100644 internal/serving/disk/symlink/shims.go create mode 100644 internal/serving/disk/symlink/symlink.go create mode 100644 internal/serving/disk/zip/serving.go create mode 100644 internal/serving/disk/zip/serving_test.go create mode 100644 internal/serving/handler.go create mode 100644 internal/serving/lookup_path.go create mode 100644 internal/serving/request.go create mode 100644 internal/serving/serverless/certs.go create mode 100644 internal/serving/serverless/cluster.go create mode 100644 internal/serving/serverless/director.go create mode 100644 internal/serving/serverless/errors.go create mode 100644 internal/serving/serverless/serverless.go create mode 100644 internal/serving/serverless/serverless_test.go create mode 100644 internal/serving/serverless/transport.go create mode 100644 internal/serving/serving.go create mode 100644 internal/source/config.go create mode 100644 internal/source/disk/config.go create mode 100644 internal/source/disk/config_test.go create mode 100644 internal/source/disk/custom.go create mode 100644 internal/source/disk/disk.go create mode 100644 internal/source/disk/domain_test.go create mode 100644 internal/source/disk/group.go create mode 100644 internal/source/disk/group_test.go create mode 100644 internal/source/disk/map.go create mode 100644 internal/source/disk/map_test.go create mode 100644 internal/source/domains.go create mode 100644 internal/source/domains_test.go create mode 100644 internal/source/gitlab/api/client.go create mode 100644 internal/source/gitlab/api/lookup.go create mode 100644 internal/source/gitlab/api/lookup_path.go create mode 100644 internal/source/gitlab/api/resolver.go create mode 100644 internal/source/gitlab/api/virtual_domain.go create mode 100644 internal/source/gitlab/cache/cache.go create mode 100644 internal/source/gitlab/cache/cache_test.go create mode 100644 internal/source/gitlab/cache/entry.go create mode 100644 internal/source/gitlab/cache/entry_test.go create mode 100644 internal/source/gitlab/cache/memstore.go create mode 100644 internal/source/gitlab/cache/retriever.go create mode 100644 internal/source/gitlab/cache/retriever_test.go create mode 100644 internal/source/gitlab/cache/store.go create mode 100644 internal/source/gitlab/client/client.go create mode 100644 internal/source/gitlab/client/client_stub.go create mode 100644 internal/source/gitlab/client/client_test.go create mode 100644 internal/source/gitlab/client/config.go create mode 100644 internal/source/gitlab/client/testdata/test.gitlab.io.json create mode 100644 internal/source/gitlab/factory.go create mode 100644 internal/source/gitlab/factory_test.go create mode 100644 internal/source/gitlab/gitlab.go create mode 100644 internal/source/gitlab/gitlab_poll.go create mode 100644 internal/source/gitlab/gitlab_poll_test.go create mode 100644 internal/source/gitlab/gitlab_test.go create mode 100644 internal/source/source.go create mode 100644 internal/source/source_mock.go create mode 100644 internal/testhelpers/chdir.go create mode 100644 internal/testhelpers/testhelpers.go create mode 100644 internal/testhelpers/tmpdir.go create mode 100644 internal/tlsconfig/tlsconfig.go create mode 100644 internal/tlsconfig/tlsconfig_test.go create mode 100644 internal/validateargs/validateargs.go create mode 100644 internal/validateargs/validateargs_test.go create mode 100644 internal/vfs/errors.go create mode 100644 internal/vfs/file.go create mode 100644 internal/vfs/local/root.go create mode 100644 internal/vfs/local/root_test.go create mode 100644 internal/vfs/local/testdata/file create mode 120000 internal/vfs/local/testdata/link create mode 100644 internal/vfs/local/vfs.go create mode 100644 internal/vfs/local/vfs_test.go create mode 100644 internal/vfs/root.go create mode 100644 internal/vfs/vfs.go create mode 100644 internal/vfs/zip/archive.go create mode 100644 internal/vfs/zip/archive_test.go create mode 100644 internal/vfs/zip/deflate_reader.go create mode 100644 internal/vfs/zip/lru_cache.go create mode 100644 internal/vfs/zip/vfs.go create mode 100644 internal/vfs/zip/vfs_test.go create mode 100644 main.go create mode 100644 metrics/metrics.go create mode 100644 multi_string_flag.go create mode 100644 multi_string_flag_test.go create mode 100644 server.go create mode 100644 shared/invalid-pages/.update/.gitkeep create mode 100644 shared/lookups/new-source-test.gitlab.io.json create mode 100644 shared/lookups/zip-malformed.gitlab.io.json create mode 100644 shared/lookups/zip-not-found.gitlab.io.json create mode 100644 shared/lookups/zip.gitlab.io.json create mode 100644 shared/pages/.hidden.group/project/.gitkeep create mode 100644 shared/pages/@hashed/hashed.gitlab.io/config.json create mode 100644 shared/pages/@hashed/hashed.gitlab.io/public/index.html create mode 100644 shared/pages/CapitalGroup/CapitalProject/public/index.html create mode 100644 shared/pages/CapitalGroup/project/public/index.html create mode 100644 shared/pages/README.md create mode 100644 shared/pages/group.404/domain.404/config.json create mode 100644 shared/pages/group.404/domain.404/public/404.html create mode 100644 shared/pages/group.404/group.404.gitlab-example.com/public/404.html create mode 100644 shared/pages/group.404/group.404.test.io/public/404.html create mode 100644 shared/pages/group.404/private_project/config.json create mode 100644 shared/pages/group.404/private_project/public/404.html create mode 100644 shared/pages/group.404/private_unauthorized/config.json create mode 100644 shared/pages/group.404/private_unauthorized/public/404.html create mode 120000 shared/pages/group.404/project.404.symlink/public/404.html create mode 100644 shared/pages/group.404/project.404/public/404.html create mode 100644 shared/pages/group.404/project.no.404/public/index.html create mode 100644 shared/pages/group.acme/with.acme.challenge/config.json create mode 100644 shared/pages/group.acme/with.acme.challenge/public/.well-known/acme-challenge/existingtoken create mode 100644 shared/pages/group.acme/with.acme.challenge/public/.well-known/acme-challenge/foldertoken/index.html create mode 100644 shared/pages/group.acme/with.acme.challenge/public/index.html create mode 100644 shared/pages/group.auth/group.auth.gitlab-example.com/config.json create mode 100644 shared/pages/group.auth/group.auth.gitlab-example.com/public/404.html create mode 100644 shared/pages/group.auth/group.auth.gitlab-example.com/public/index.html create mode 100644 shared/pages/group.auth/group.auth.gitlab-example.com/public/private.project/index.html create mode 100644 shared/pages/group.auth/private.project.1/config.json create mode 100644 shared/pages/group.auth/private.project.1/public/404.html create mode 100644 shared/pages/group.auth/private.project.1/public/index.html create mode 100644 shared/pages/group.auth/private.project.2/config.json create mode 100644 shared/pages/group.auth/private.project.2/public/index.html create mode 100644 shared/pages/group.auth/private.project/config.json create mode 100644 shared/pages/group.auth/private.project/public/index.html create mode 100644 shared/pages/group.auth/subgroup/private.project.1/config.json create mode 100644 shared/pages/group.auth/subgroup/private.project.1/public/index.html create mode 100644 shared/pages/group.auth/subgroup/private.project.2/config.json create mode 100644 shared/pages/group.auth/subgroup/private.project.2/public/index.html create mode 100644 shared/pages/group.auth/subgroup/private.project/config.json create mode 100644 shared/pages/group.auth/subgroup/private.project/public/index.html create mode 100644 shared/pages/group.deleted/is_file.txt create mode 100644 shared/pages/group.deleted/project.deleted/public/.gitkeep create mode 100644 shared/pages/group.hidden/.hidden.project/public/.gitkeep create mode 100644 shared/pages/group.https-only/project1/config.json create mode 100644 shared/pages/group.https-only/project1/public/index.html create mode 100644 shared/pages/group.https-only/project2/config.json create mode 100644 shared/pages/group.https-only/project2/public/index.html create mode 100644 shared/pages/group.https-only/project3/config.json create mode 100644 shared/pages/group.https-only/project3/public/index.html create mode 100644 shared/pages/group.https-only/project4/config.json create mode 100644 shared/pages/group.https-only/project4/public/index.html create mode 100644 shared/pages/group.https-only/project5/config.json create mode 100644 shared/pages/group.https-only/project5/public/index.html create mode 100644 shared/pages/group.internal/project.internal/public/.gitkeep create mode 100644 shared/pages/group.no.projects/.gitkeep create mode 100644 shared/pages/group.no.public/project/.gitkeep create mode 100644 shared/pages/group.redirects/custom-domain/config.json create mode 100644 shared/pages/group.redirects/custom-domain/public/_redirects create mode 100644 shared/pages/group.redirects/group.redirects.gitlab-example.com/public/_redirects create mode 100644 shared/pages/group.redirects/project-redirects/public/_redirects create mode 100644 shared/pages/group.redirects/project-redirects/public/file-override.html create mode 100644 shared/pages/group.redirects/project-redirects/public/index.html create mode 100644 shared/pages/group.redirects/project-redirects/public/magic-land.html create mode 100644 shared/pages/group/CapitalProject/public/index.html create mode 100644 shared/pages/group/group.gitlab-example.com/public/index.html create mode 100644 shared/pages/group/group.gitlab-example.com/public/index.html.br create mode 100644 shared/pages/group/group.gitlab-example.com/public/index.html.gz create mode 100644 shared/pages/group/group.gitlab-example.com/public/project/index.html create mode 100644 shared/pages/group/group.test.io/.gitkeep create mode 100644 shared/pages/group/group.test.io/config.json create mode 100644 shared/pages/group/group.test.io/public/.gitkeep create mode 100644 shared/pages/group/group.test.io/public/gz-symlink create mode 120000 shared/pages/group/group.test.io/public/gz-symlink.br create mode 120000 shared/pages/group/group.test.io/public/gz-symlink.gz create mode 100644 shared/pages/group/group.test.io/public/image-nogzip.unknown create mode 100644 shared/pages/group/group.test.io/public/image.unknown create mode 100644 shared/pages/group/group.test.io/public/image.unknown.br create mode 100644 shared/pages/group/group.test.io/public/image.unknown.gz create mode 100644 shared/pages/group/group.test.io/public/index.html create mode 100644 shared/pages/group/group.test.io/public/index.html.br create mode 100644 shared/pages/group/group.test.io/public/index.html.gz create mode 100644 shared/pages/group/group.test.io/public/index2.html create mode 100644 shared/pages/group/group.test.io/public/index2.html.gz create mode 100644 shared/pages/group/group.test.io/public/project2/index.html create mode 100644 shared/pages/group/group.test.io/public/text-nogzip.unknown create mode 100644 shared/pages/group/group.test.io/public/text.unknown create mode 100644 shared/pages/group/group.test.io/public/text.unknown.br create mode 100644 shared/pages/group/group.test.io/public/text.unknown.gz create mode 100644 shared/pages/group/new-source-test.gitlab.io/public/index.html create mode 100644 shared/pages/group/project/public/file.webmanifest create mode 100644 shared/pages/group/project/public/index.html create mode 100644 shared/pages/group/project/public/subdir/index.html create mode 100644 shared/pages/group/project2/public/index.html create mode 100644 shared/pages/group/project2/public/subdir/index.html create mode 100644 shared/pages/group/serving/public/index.html create mode 100644 shared/pages/group/subgroup/project/public/index.html create mode 100644 shared/pages/group/subgroup/project/public/subdir/index.html create mode 100644 shared/pages/group/zip.gitlab.io/public-without-dirs.zip create mode 100644 shared/pages/group/zip.gitlab.io/public.zip create mode 100644 shared/pages/is_file create mode 100644 test/acceptance/acceptance_test.go create mode 100644 test/acceptance/acme_test.go create mode 100644 test/acceptance/artifacts_test.go create mode 100644 test/acceptance/auth_test.go create mode 100644 test/acceptance/config_test.go create mode 100644 test/acceptance/encodings_test.go create mode 100644 test/acceptance/helpers_test.go create mode 100644 test/acceptance/metrics_test.go create mode 100644 test/acceptance/proxyv2_test.go create mode 100644 test/acceptance/redirects_test.go create mode 100644 test/acceptance/serving_test.go create mode 100644 test/acceptance/status_test.go create mode 100644 test/acceptance/stub_test.go create mode 100644 test/acceptance/tls_test.go create mode 100644 test/acceptance/unknown_http_method_test.go create mode 100644 test/acceptance/zip_test.go create mode 100644 tools.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..e3e689d12 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +# Created by .ignore support plugin (hsz.mobi) +shared/pages/.update +/gitlab-pages +/vendor +/gitlab-pages.conf +/gl-code-quality-report.json +/gl-license-scanning-report.json +/coverage.html +/junit-test-report.xml +/tests.out + +# Used by the makefile +/.GOPATH +/bin diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 000000000..08a5a6411 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,33 @@ +stages: + - prepare + - test + +workflow: + rules: + # For merge requests, create a pipeline. + - if: '$CI_MERGE_REQUEST_IID' + # For `master` branch, create a pipeline (this includes on schedules, pushes, merges, etc.). + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + # For tags, create a pipeline. + - if: '$CI_COMMIT_TAG' + # For stable, and security branches, create a pipeline. + - if: '$CI_COMMIT_BRANCH =~ /^[\d-]+-stable(-ee)?$/' + - if: '$CI_COMMIT_BRANCH =~ /^security\//' + +include: + - local: .gitlab/ci/prepare.yml + - local: .gitlab/ci/test.yml + +default: + image: golang:1.13 + tags: + - gitlab-org + +.go-mod-cache: + variables: + GOPATH: $CI_PROJECT_DIR/.GOPATH + before_script: + - mkdir -p .GOPATH + cache: + paths: + - .GOPATH/pkg/mod/ diff --git a/.gitlab/ci/prepare.yml b/.gitlab/ci/prepare.yml new file mode 100644 index 000000000..1b1347ea9 --- /dev/null +++ b/.gitlab/ci/prepare.yml @@ -0,0 +1,52 @@ +# TODO: use versioned templates https://gitlab.com/gitlab-org/gitlab-pages/-/issues/456 +include: + - template: Security/License-Scanning.gitlab-ci.yml + - template: Security/SAST.gitlab-ci.yml + - template: Security/Dependency-Scanning.gitlab-ci.yml + - template: Security/Secret-Detection.gitlab-ci.yml + +# workflow rules are not extended by scanner jobs, need to override them manually +# TODO: remove when https://gitlab.com/gitlab-org/gitlab/-/issues/218444 is done + +.rules-for-scanners: &rules-for-scanners + stage: prepare + rules: + # For merge requests, create a pipeline. + - if: '$CI_MERGE_REQUEST_IID' + # For `master` branch, create a pipeline (this includes on schedules, pushes, merges, etc.). + - if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + # For tags, create a pipeline. + - if: '$CI_COMMIT_TAG' + # For stable, and security branches, create a pipeline. + - if: '$CI_COMMIT_BRANCH =~ /^[\d-]+-stable(-ee)?$/' + - if: '$CI_COMMIT_BRANCH =~ /^security\//' + +license_scanning: + <<: *rules-for-scanners + +gemnasium-dependency_scanning: + <<: *rules-for-scanners + +secret_detection: + stage: prepare + rules: + # For merge requests, create a pipeline. + - if: '$CI_MERGE_REQUEST_IID' + +gosec-sast: + <<: *rules-for-scanners + +# disable eslint-sast since html files are fixtures for testing +eslint-sast: + rules: + - when: never + +download deps: + extends: .go-mod-cache + stage: prepare + script: + - make deps-download + artifacts: + paths: + - go.mod + - go.sum diff --git a/.gitlab/ci/test.yml b/.gitlab/ci/test.yml new file mode 100644 index 000000000..74d49ee6b --- /dev/null +++ b/.gitlab/ci/test.yml @@ -0,0 +1,88 @@ +.tests-common: + extends: .go-mod-cache + stage: test + tags: + - gitlab-org-docker + needs: ['download deps'] + artifacts: + reports: + junit: junit-test-report.xml + +.tests-unit: + extends: .tests-common + script: + - echo "Running all tests without daemonizing..." + - make setup + - make test + - make junit-report + +.tests-acceptance-deamon: + extends: .tests-common + script: + - make setup + - echo "Running just the acceptance tests daemonized (tmpdir)...." + - TEST_DAEMONIZE=tmpdir make acceptance + - echo "Running just the acceptance tests daemonized (inplace)...." + - TEST_DAEMONIZE=inplace make acceptance + - make junit-report + +test:1.13: + extends: .tests-unit + image: golang:1.13 + +test-acceptance:1.13: + extends: .tests-acceptance-deamon + image: golang:1.13 + +test:1.14: + extends: .tests-unit + image: golang:1.14 + +test-acceptance:1.14: + extends: .tests-acceptance-deamon + image: golang:1.14 + +test:1.15: + extends: .tests-unit + image: golang:1.15 +test-acceptance:1.15: + extends: .tests-acceptance-deamon + image: golang:1.15 + +race: + extends: .tests-common + script: + - echo "Running race detector" + - make race + +cover: + extends: .tests-common + script: + - make setup + - make generate-mocks + - make cover + coverage: '/total:.+\(statements\).+\d+\.\d+/' + artifacts: + paths: + - coverage.html + +code_quality: + extends: .tests-common + image: golangci/golangci-lint:v1.27.0 + variables: + REPORT_FILE: gl-code-quality-report.json + LINT_FLAGS: "--color never --deadline 15m" + OUT_FORMAT: code-climate + script: + - golangci-lint run ./... --out-format ${OUT_FORMAT} ${LINT_FLAGS} | tee ${REPORT_FILE} + timeout: 15 minutes + artifacts: + reports: + codequality: ${REPORT_FILE} + paths: + - ${REPORT_FILE} + +check deps: + extends: .tests-common + script: + - make deps-check diff --git a/.gitlab/issue_templates/release.md b/.gitlab/issue_templates/release.md new file mode 100644 index 000000000..f1551178e --- /dev/null +++ b/.gitlab/issue_templates/release.md @@ -0,0 +1,64 @@ +- [ ] Set the milestone on this issue +- [ ] Review the list of changes since the last release and fill below: + - [ ] **In the changelog** + - [ ] **Not in the changelog** + + Hint: + ``` + git --no-pager log --merges --pretty=oneline master...vX.Y.Z + ``` +- Decide on the version number by reference to + the [Versioning](https://gitlab.com/gitlab-org/gitlab-pages/blob/master/PROCESS.md#versioning) + * Typically if you want to release code from current `master` branch you will update `MINOR` version, e.g. `1.12.0` -> `1.13.0`. In that case you **don't** need to create stable branch + * If you want to backport some bug fix or security fix you will need to update stable branch `X-Y-stable` +- [ ] Create an MR for [gitlab-pages project](https://gitlab.com/gitlab-org/gitlab-pages). + You can use [this MR](https://gitlab.com/gitlab-org/gitlab-pages/merge_requests/217) as an example. + - [ ] Update `VERSION` + - [ ] Update `CHANGELOG` + - [ ] Assign to reviewer +- [ ] Once `gitlab-pages` is merged create a signed+annotated tag pointing to the **merge commit** on the **stable branch** + In case of `master` branch: + ```shell + git fetch origin master + git fetch dev master + git tag -a -s -m "Release v1.0.0" v1.0.0 origin/master + ``` + In case of `stable` branch: + ```shell + git fetch origin 1-0-stable + git fetch dev 1-0-stable + git tag -a -s -m "Release v1.0.0" v1.0.0 origin/1-0-stable + ``` +- [ ] Verify that you created tag properly: + ```shell + git show v1.0.0 + ``` + it should include something like: + * ```(tag: v1.0.0, origin/master, dev/master, master)``` for `master` + * ```(tag: v1.0.1, origin/1-0-stable, dev/1-0-stable, 1-0-stable)``` for `stable` branch +- [ ] Push this tag to origin(**Skip this for security release!**) + ```shell + git push origin v1.0.0 + ``` +- [ ] Wait for tag to be mirrored to `dev` or push it: + ```shell + git push dev v1.0.0 + ``` +- [ ] Create an MR for [gitlab project](https://gitlab.com/gitlab-org/gitlab). + You can use [this MR](https://gitlab.com/gitlab-org/gitlab/merge_requests/23023) as an example. + - [ ] Update `GITLAB_PAGES_VERSION` + - [ ] Create a changelog entry + - [ ] Assign to a reviewer + +### In the changelog +``` +- some change +- some change +``` +### Not in the changelog +``` +- some change +- some change +``` + +/label ~backend ~backstage ~"Category:Pages" ~"devops::release" ~"group::release management" diff --git a/.gitlab/merge_request_templates/Security Release.md b/.gitlab/merge_request_templates/Security Release.md new file mode 100644 index 000000000..df832c7d4 --- /dev/null +++ b/.gitlab/merge_request_templates/Security Release.md @@ -0,0 +1,33 @@ + +## Related issues + + + +## Developer checklist + +- [ ] Link to the original confidential issue on https://gitlab.com/gitlab-org/gitlab-pages. **Warning don't associate this MR with the security implementation issue on GitLab Security** +- [ ] MR targets `master`, or `X-Y-stable` for backports +- [ ] Milestone is set for the version this MR applies to +- [ ] Title of this MR is the same as for all backports +- [ ] A CHANGELOG entry is added +- [ ] Add a link to this MR in the `links` section of related issue +- [ ] Create a merge request in [GitLab Security](https://gitlab.com/gitlab-org/security/gitlab) bumping GitLab pages version: MR_LINK_HERE +- [ ] Assign to a Pages maintainer for review and merge + +## Reviewer checklist + +- [ ] Correct milestone is applied and the title is matching across all backports +- [ ] Merge this merge request +- [ ] Create corresponding tag and push it to https://gitlab.com/gitlab-org/security/gitlab-pages + +/label ~security diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..2b97de091 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,63 @@ +run: + concurrency: 8 + deadline: 1m + issues-exit-code: 1 + modules-download-mode: readonly + tests: true + skip-dirs: + - vendor + - internal/httputil # from github.com/golang/gddo + - internal/serving/disk/symlink + skip-files: + - mock_*.go + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true + +linters-settings: + gocyclo: + min-complexity: 10 + govet: + check-shadowing: false + goconst: + min-len: 3 + min-occurrences: 3 + goimports: + local-prefixes: gitlab.com/gitlab-org/gitlab-pages + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - dogsled + - goconst + - gocyclo + - goimports + - golint + - gosimple + - govet + - gosec + - ineffassign + - misspell + - structcheck + - typecheck + - unconvert + - unused + - varcheck + - whitespace + fast: false + +issues: +# # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: ".*_test.go" + linters: + - bodyclose + - gosec + - goconst + - path: "internal/fixture/fixtures.go" + linters: + - gosec diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 000000000..63d9ded15 --- /dev/null +++ b/.tool-versions @@ -0,0 +1 @@ +golang 1.15.5 diff --git a/CHANGELOG b/CHANGELOG new file mode 100644 index 000000000..e315ddcc0 --- /dev/null +++ b/CHANGELOG @@ -0,0 +1,336 @@ +v 1.34.0 + +- Allow DELETE HTTP method + +v 1.33.0 + +- Reject requests with unknown HTTP methods +- Encrypt OAuth code during auth flow + +v 1.32.0 + +- Try to automatically use gitlab API as a source for domain information !402 +- Fix https redirect loop for PROXYv2 protocol !405 + +v 1.31.0 + +- Support for HTTPS over PROXYv2 protocol !278 +- Update LabKit library to v1.0.0 !397 +- Add zip serving configuration flags !392 +- Disable deprecated serverless serving and proxy !400 + +v 1.30.2 + +- Allow DELETE HTTP method + +v 1.30.1 + +- Reject requests with unknown HTTP methods +- Encrypt OAuth code during auth flow + +v 1.30.0 + +- Allow to refresh an existing cached archive when accessed !375 + +v 1.29.0 + +- Fix LRU cache metrics !379 +- Upgrade go-mimedb to support new types including avif images !353 +- Return 5xx instead of 404 if pages zip serving is unavailable !381 +- Make timeouts for ZIP VFS configurable !385 +- Improve httprange timeouts !382 +- Fix caching for errored ZIP VFS archives !384 + +v 1.28.2 + +- Allow DELETE HTTP method + +v 1.28.1 + +- Reject requests with unknown HTTP methods +- Encrypt OAuth code during auth flow + +v 1.28.0 + +- Implement basic redirects via _redirects text file !367 +- Add support for pre-compressed brotly files !359 +- Add serving type to log !369 +- Improve performance of ZIP serving !364 +- Fix support for archives without directory structure !373 + +v 1.27.0 + +- Add more metrics for zip serving !363 !338 + +v 1.26.0 + +- Add the ability to serve web-sites from the zip archive stored in object storage !351 + +v 1.25.0 + +- No user-facing changes + +v 1.24.0 + +- Unshare mount namespaces when creating jail !342 + +v 1.23.0 + +- Add VFS for local disk !324 +- Fully support `domain-config-source=gitlab` !332 + +v 1.22.0 + +- Serve custom 404.html file for namespace domains !263 +- Poll internal status API !304 !306 +- Enable `domain-config-source=disk` by default Use domain config source disk !305 +- Set Content-Length when Content-Encoding is set !227 + +v 1.21.0 + +- Copy certs from SSL_CERT_DIR into chroot jail !291 + +v 1.20.0 + +- Enable continuous profiling !297 + +v 1.19.0 + +- Add file size metric for disk serving !294 +- Add pprof to metrics endpoint !271 + +v 1.18.0 + +- Fix proxying artifacts with escaped characters !255 +- Introduce internal-gitlab-server flag to allow using the internal network for communicating to the GitLab server !276 +- Increase maximum idle connections pool size from 2 to 100 !274 +- Disable passing auth-related secret parameters as command line flags !269 +- Fix unused idle API connection bug !275 + +v 1.17.0 + +- Extract health check in its own middleware !247 +- Increase GitLab internal API response timeout !253 +- Add support for proxying GitLab serverless requests !232 + +v 1.16.0 + +- Add metrics for GitLab API calls !229 +- Change the way proxy headers like `X-Forwarded-For` are handled !225 + +v 1.15.0 + +- Implement support for incremental rollout of the new API based configuration source +- Add domain configuration duration (from disk) to the exported Prometheus metrics +- Make GitLab API client timeout and JWT expiry configurable + +v 1.14.0 + +- Rollback godirwalk to v1.10.12 due to significant performance degradation + +v 1.13.0 + +- Implement API based configuration source (not yet used) +- Update godirwalk to v1.14.0 + +v 1.12.0 + +- Add minimal support for the api-secret-key config flag (not yet used) +- Add warnings about secrets given through command-line flags +- Remove Admin gRPC api (was never used) + +v 1.11.0 + +- Refactor domain package and extract disk serving !189 +- Separate domain config source !188 + +v 1.10.0 + +- Add support for previewing artifacts that are not public !134 + +v 1.9.0 + +- Add full HTTP metrics and logging to GitLab pages using LabKit + +v 1.8.1 + +- Limit auth cookie max-age to 10 minutes +- Use secure cookies for auth session + +v 1.8.0 + +- Fix https downgrade in auth process +- Fix build under go-pie environment +- Change Prometheus metrics names +- Require minimum golang version 1.11 to build +- Add the ability to define custom HTTP headers for all served sites + +v 1.7.2 + +- Fix https to http downgrade for auth process +- Limit auth cookie max-age to 10 minutes +- Use secure cookies for auth session + +v 1.7.1 + +- Security fix for recovering gitlab access token from cookies + +v 1.7.0 + +- Add support for Sentry error reporting + +v 1.6.3 + +- Fix https to http downgrade for auth process +- Limit auth cookie max-age to 10 minutes +- Use secure cookies for auth session + +v 1.6.2 + +- Security fix for recovering gitlab access token from cookies + +v 1.6.1 + +- Fix serving acme challenges with index.html + +v 1.6.0 + +- Use proxy from environment for http request !131 +- Use STDOUT for flag outputs !132 +- Prepare pages auth logs for production rollout !138 +- Redirect unknown ACME challenges to the GitLab instance !141 +- Disable 3DES and other insecure cipher suites !145 +- Provide ability to disable old TLS versions !146 + +v 1.5.1 + +- Security fix for recovering gitlab access token from cookies + +v 1.5.0 + +- Make extensionless URLs work !112 + +v 1.4.0 +- Prevent wrong mimetype being set for GZipped files with unknown file extension !122 +- Pages for subgroups !123 +- Make content-type detection consistent between file types !126 + +v 1.3.1 +- Fix TOCTOU race condition when serving files + +v 1.3.0 +- Allow the maximum connection concurrency to be set !117 +- Update Prometheus vendoring to v0.9 !116 +- Fix version string not showing properly !115 + +v 1.2.1 +- Fix 404 for project with capital letters !114 + +v 1.2.0 +- Stop serving shadowed namespace project files !111 +- Make GitLab pages support access control !94 + +v 1.1.0 +- Fix HTTP to HTTPS redirection not working for default domains !106 +- Log duplicate domain names !107 +- Abort domain scan if a failure is encountered !102 +- Update Prometheus vendoring !105 + +v 1.0.0 +- Use permissive unix socket permissions !95 +- Fix logic for output of domains in debug mode !98 +- Add support for reverse proxy header X-Forwarded-Host !99 + +v 0.9.1 +- Clean up the created jail directory if building the jail doesn't work !90 +- Restore the old in-place chroot behaviour as a command-line option !92 +- Create /dev/random and /dev/urandom when daemonizing and jailing !93 + +v 0.9.0 +- Add gRPC admin health check !85 + +v 0.8.0 +- Add /etc/resolv.conf and /etc/ssl/certs to pages chroot !51 +- Avoid unnecessary stat calls when building domain maps !60 +- Parallelize IO during the big project scan !61 +- Add more logging to gitlab pages daemon !62 +- Remove noisy debug logs !65 +- Don't log request or referer query strings !77 +- Make certificate parsing thread-safe !79 + +v 0.7.1 +- Fix nil reference error when project is not in config.json !70 + +v 0.7.0 +- HTTPS-only pages !50 +- Switch to govendor !54 +- Add logrus !55 +- Structured logging !56 +- Use https://github.com/jshttp/mime-db to populate the mimedb !57 + +v 0.6.1 +- Sanitize redirects by issuing a complete URI + +v 0.6.0 +- Use namsral/flag to support environment vars for config !40 +- Cleanup the README file !41 +- Add an artifacts proxy to GitLab Pages !44 !46 +- Resolve "'cannot find package' when running make" !45 + +v 0.5.1 +- Don't serve statically-compiled `.gz` files that are symlinks + +v 0.5.0 +- Don't try to update domains if reading the update file fails !32 +- Add CORS support to GET requests !33 +- Add CONTRIBUTING.md !34 +- Add basic cache directives to gitlab-pages !35 +- Go 1.8 is the minimum supported version !36 +- Fix HTTP/2 ALPN negotiation !37 +- Add disabled-by-default status check endpoint !39 + +v 0.4.3 +- Fix domain lookups when Pages is exposed on non-default ports + +v 0.4.2 +- Support for statically compressed gzip content-encoding + +v 0.4.1 +- Fix reading configuration for multiple custom domains + +v 0.4.0 +- Fix the `-redirect-http` option so it redirects from HTTP to HTTPS when enabled !21 + +v 0.3.2 +- Only pass a metrics fd to the daemon child if a metrics address was specified + +v 0.3.1 +- Pass the metrics address fd to the child process + +v 0.3.0 +- Prometheus metrics support with `metrics-address` + +v 0.2.5 +- Allow listen-http, listen-https and listen-proxy to be specified multiple times + +v 0.2.4 +- Fix predefined 404 page content-type + +v 0.2.3 +- Add `-version` to command line + +v 0.2.2 +- Fix predefined 404 page content-type + +v 0.2.1 +- Serve nice GitLab branded 404 page +- Present user's error page for 404: put the 404.html in root of your pages + +v 0.2.0 +- Execute the unprivileged pages daemon in chroot + +v 0.1.0 +- Allow to run the pages daemon unprivileged (-daemon-uid, -daemon-gid) + +v 0.0.0 +- Initial release diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..0d6f65777 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,613 @@ +## Developer Certificate of Origin + License + +By contributing to GitLab B.V., You accept and agree to the following terms and +conditions for Your present and future Contributions submitted to GitLab B.V. +Except for the license granted herein to GitLab B.V. and recipients of software +distributed by GitLab B.V., You reserve all right, title, and interest in and to +Your Contributions. All Contributions are subject to the following DCO + License +terms. + +[DCO + License](https://gitlab.com/gitlab-org/dco/blob/master/README.md) + +All Documentation content that resides under the [doc/ directory](/doc) of this +repository is licensed under Creative Commons: +[CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/). + +_This notice should stay as the first item in the CONTRIBUTING.md file._ + +--- + + + +**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* + +- [Contribute to GitLab](#contribute-to-gitlab) +- [Security vulnerability disclosure](#security-vulnerability-disclosure) +- [Closing policy for issues and merge requests](#closing-policy-for-issues-and-merge-requests) +- [Helping others](#helping-others) +- [I want to contribute!](#i-want-to-contribute) +- [Workflow labels](#workflow-labels) + - [Type labels (~"feature proposal", ~bug, ~customer, etc.)](#type-labels-feature-proposal-bug-customer-etc) + - [Subject labels (~wiki, ~"container registry", ~ldap, ~api, etc.)](#subject-labels-wiki-container-registry-ldap-api-etc) + - [Team labels (~CI, ~Discussion, ~Edge, ~Platform, etc.)](#team-labels-ci-discussion-edge-platform-etc) + - [Priority labels (~Deliverable and ~Stretch)](#priority-labels-deliverable-and-stretch) + - [Label for community contributors (~"Accepting Merge Requests")](#label-for-community-contributors-accepting-merge-requests) +- [Implement design & UI elements](#implement-design--ui-elements) +- [Issue tracker](#issue-tracker) + - [Issue triaging](#issue-triaging) + - [Feature proposals](#feature-proposals) + - [Issue tracker guidelines](#issue-tracker-guidelines) + - [Issue weight](#issue-weight) + - [Regression issues](#regression-issues) + - [Technical debt](#technical-debt) + - [Stewardship](#stewardship) +- [Merge requests](#merge-requests) + - [Merge request guidelines](#merge-request-guidelines) + - [Contribution acceptance criteria](#contribution-acceptance-criteria) +- [Definition of done](#definition-of-done) +- [Style guides](#style-guides) +- [Code of conduct](#code-of-conduct) + + + +--- + +## Contribute to GitLab + +Thank you for your interest in contributing to GitLab. This guide details how +to contribute to GitLab in a way that is efficient for everyone. + +GitLab comes into two flavors, GitLab Community Edition (CE) our free and open +source edition, and GitLab Enterprise Edition (EE) which is our commercial +edition. Throughout this guide you will see references to CE and EE for +abbreviation. + +If you have read this guide and want to know how the GitLab [core team] +operates please see [the GitLab contributing process](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/PROCESS.md). + +- [GitLab Inc engineers should refer to the engineering workflow document](https://about.gitlab.com/handbook/engineering/workflow/) + +## Security vulnerability disclosure + +Please report suspected security vulnerabilities in private to +`support@gitlab.com`, also see the +[disclosure section on the GitLab.com website](https://about.gitlab.com/disclosure/). +Please do **NOT** create publicly viewable issues for suspected security +vulnerabilities. + +## Closing policy for issues and merge requests + +GitLab is a popular open source project and the capacity to deal with issues +and merge requests is limited. Out of respect for our volunteers, issues and +merge requests not in line with the guidelines listed in this document may be +closed without notice. + +Please treat our volunteers with courtesy and respect, it will go a long way +towards getting your issue resolved. + +Issues and merge requests should be in English and contain appropriate language +for audiences of all ages. + +If a contributor is no longer actively working on a submitted merge request +we can decide that the merge request will be finished by one of our +[Merge request coaches][team] or close the merge request. We make this decision +based on how important the change is for our product vision. If a Merge request +coach is going to finish the merge request we assign the +~"coach will finish" label. + +## Helping others + +Please help other GitLab users when you can. The channels people will reach out +on can be found on the [getting help page][getting-help]. + +Sign up for the mailing list, answer GitLab questions on StackOverflow or +respond in the IRC channel. You can also sign up on [CodeTriage][codetriage] to help with +the remaining issues on the GitHub issue tracker. + +## I want to contribute! + +If you want to contribute to GitLab, but are not sure where to start, +look for [issues with the label `Accepting Merge Requests` and weight < 5][accepting-mrs-weight]. +These issues will be of reasonable size and challenge, for anyone to start +contributing to GitLab. + +## Workflow labels + +To allow for asynchronous issue handling, we use [milestones][milestones-page] +and [labels][labels-page]. Leads and product managers handle most of the +scheduling into milestones. Labelling is a task for everyone. + +Most issues will have labels for at least one of the following: + +- Type: ~"feature proposal", ~bug, ~customer, etc. +- Subject: ~wiki, ~"container registry", ~ldap, ~api, etc. +- Team: ~CI, ~Discussion, ~Edge, ~Frontend, ~Platform, etc. +- Priority: ~Deliverable, ~Stretch + +All labels, their meaning and priority are defined on the +[labels page][labels-page]. + +If you come across an issue that has none of these, and you're allowed to set +labels, you can _always_ add the team and type, and often also the subject. + +[milestones-page]: https://gitlab.com/gitlab-org/gitlab-pages/milestones +[labels-page]: https://gitlab.com/gitlab-org/gitlab-pages/labels + +### Type labels (~"feature proposal", ~bug, ~customer, etc.) + +Type labels are very important. They define what kind of issue this is. Every +issue should have one or more. + +Examples of type labels are ~"feature proposal", ~bug, ~customer, ~security, +and ~"direction". + +A number of type labels have a priority assigned to them, which automatically +makes them float to the top, depending on their importance. + +Type labels are always lowercase, and can have any color, besides blue (which is +already reserved for subject labels). + +The descriptions on the [labels page][labels-page] explain what falls under each type label. + +### Subject labels (~wiki, ~"container registry", ~ldap, ~api, etc.) + +Subject labels are labels that define what area or feature of GitLab this issue +hits. They are not always necessary, but very convenient. + +If you are an expert in a particular area, it makes it easier to find issues to +work on. You can also subscribe to those labels to receive an email each time an +issue is labelled with a subject label corresponding to your expertise. + +Examples of subject labels are ~wiki, ~"container registry", ~ldap, ~api, +~issues, ~"merge requests", ~labels, and ~"container registry". + +Subject labels are always all-lowercase. + +### Team labels (~CI, ~Discussion, ~Edge, ~Platform, etc.) + +Team labels specify what team is responsible for this issue. +Assigning a team label makes sure issues get the attention of the appropriate +people. + +The current team labels are ~Build, ~CI, ~Discussion, ~Documentation, ~Edge, +~Gitaly, ~Platform, ~Prometheus, ~Release, and ~"UX". + +The descriptions on the [labels page][labels-page] explain what falls under the +responsibility of each team. + +Within those team labels, we also have the ~backend and ~frontend labels to +indicate if an issue needs backend work, frontend work, or both. + +Team labels are always capitalized so that they show up as the first label for +any issue. + +### Priority labels (~Deliverable and ~Stretch) + +Priority labels help us clearly communicate expectations of the work for the +release. There are two levels of priority labels: + +- ~Deliverable: Issues that are expected to be delivered in the current + milestone. +- ~Stretch: Issues that are a stretch goal for delivering in the current + milestone. If these issues are not done in the current release, they will + strongly be considered for the next release. + +### Label for community contributors (~"Accepting Merge Requests") + +Issues that are beneficial to our users, 'nice to haves', that we currently do +not have the capacity for or want to give the priority to, are labeled as +~"Accepting Merge Requests", so the community can make a contribution. + +Community contributors can submit merge requests for any issue they want, but +the ~"Accepting Merge Requests" label has a special meaning. It points to +changes that: + +1. We already agreed on, +1. Are well-defined, +1. Are likely to get accepted by a maintainer. + +We want to avoid a situation when a contributor picks an +~"Accepting Merge Requests" issue and then their merge request gets closed, +because we realize that it does not fit our vision, or we want to solve it in a +different way. + +We add the ~"Accepting Merge Requests" label to: + +- Low priority ~bug issues (i.e. we do not add it to the bugs that we want to +solve in the ~"Next Patch Release") +- Small ~"feature proposal" that do not need ~UX / ~"Product work", or for which +the ~UX / ~"Product work" is already done +- Small ~"technical debt" issues + +After adding the ~"Accepting Merge Requests" label, we try to estimate the +[weight](#issue-weight) of the issue. We use issue weight to let contributors +know how difficult the issue is. Additionally: + +- We advertise [~"Accepting Merge Requests" issues with weight < 5][up-for-grabs] + as suitable for people that have never contributed to GitLab before on the + [Up For Grabs campaign](http://up-for-grabs.net) +- We encourage people that have never contributed to any open source project to + look for [~"Accepting Merge Requests" issues with a weight of 1][firt-timers] + +[up-for-grabs]: https://gitlab.com/gitlab-org/gitlab-pages/issues?label_name=Accepting+Merge+Requests&scope=all&sort=weight_asc&state=opened +[firt-timers]: https://gitlab.com/gitlab-org/gitlab-pages/issues?label_name%5B%5D=Accepting+Merge+Requests&scope=all&sort=upvotes_desc&state=opened&weight=1 + +## Implement design & UI elements + +Please see the [UX Guide for GitLab]. + +## Issue tracker + +To get support for your particular problem please use the +[getting help channels](https://about.gitlab.com/getting-help/). + +The [GitLab Pages issue tracker on GitLab.com][pages-tracker] is for bugs concerning +the latest GitLab Pages release and [feature proposals](#feature-proposals). + +When submitting an issue please conform to the issue submission guidelines +listed below. Not all issues will be addressed and your issue is more likely to +be addressed if you submit a merge request which partially or fully solves +the issue. + +If you're unsure where to post, post to the [mailing list][google-group] or +[Stack Overflow][stackoverflow] first. There are a lot of helpful GitLab users +there who may be able to help you quickly. If your particular issue turns out +to be a bug, it will find its way from there. + +If it happens that you know the solution to an existing bug, please first +open the issue in order to keep track of it and then open the relevant merge +request that potentially fixes it. + +### Issue triaging + +Our issue triage policies are [described in our handbook]. You are very welcome +to help the GitLab team triage issues. We also organize [issue bash events] once +every quarter. + +The most important thing is making sure valid issues receive feedback from the +development team. Therefore the priority is mentioning developers that can help +on those issues. Please select someone with relevant experience from the +[GitLab team][team]. If there is nobody mentioned with that expertise look in +the commit history for the affected files to find someone. + +[described in our handbook]: https://about.gitlab.com/handbook/engineering/issues/issue-triage-policies/ +[issue bash events]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17815 + +### Feature proposals + +To create a feature proposal for GitLab Pages, open an issue on the +[GitLab Pages issue tracker][pages-tracker]. + +In order to help track the feature proposals, we have created a +[`feature proposal`][fpl] label. For the time being, users that are not members +of the project cannot add labels. You can instead ask one of the [core team] +members to add the label `feature proposal` to the issue or add the following +code snippet right after your description in a new line: `~"feature proposal"`. + +Please keep feature proposals as small and simple as possible, complex ones +might be edited to make them small and simple. + +For changes in the interface, it can be helpful to create a mockup first. +If you want to create something yourself, consider opening an issue first to +discuss whether it is interesting to include this in GitLab. + +### Issue tracker guidelines + +**[Search the issue tracker][pages-tracker]** for similar entries before +submitting your own, there's a good chance somebody else had the same issue or +feature proposal. Show your support with an award emoji and/or join the +discussion. + +### Issue weight + +Issue weight allows us to get an idea of the amount of work required to solve +one or multiple issues. This makes it possible to schedule work more accurately. + +You are encouraged to set the weight of any issue. Following the guidelines +below will make it easy to manage this, without unnecessary overhead. + +1. Set weight for any issue at the earliest possible convenience +1. If you don't agree with a set weight, discuss with other developers until +consensus is reached about the weight +1. Issue weights are an abstract measurement of complexity of the issue. Do not +relate issue weight directly to time. This is called [anchoring](https://en.wikipedia.org/wiki/Anchoring) +and something you want to avoid. +1. Something that has a weight of 1 (or no weight) is really small and simple. +Something that is 9 is rewriting a large fundamental part of GitLab, +which might lead to many hard problems to solve. Changing some text in GitLab +is probably 1, adding a new Git Hook maybe 4 or 5, big features 7-9. +1. If something is very large, it should probably be split up in multiple +issues or chunks. You can simply not set the weight of a parent issue and set +weights to children issues. + +### Regression issues + +Every monthly release has a corresponding issue on the CE issue tracker to keep +track of functionality broken by that release and any fixes that need to be +included in a patch release (see [8.3 Regressions] as an example). + +As outlined in the issue description, the intended workflow is to post one note +with a reference to an issue describing the regression, and then to update that +note with a reference to the merge request that fixes it as it becomes available. + +If you're a contributor who doesn't have the required permissions to update +other users' notes, please post a new note with a reference to both the issue +and the merge request. + +The release manager will [update the notes] in the regression issue as fixes are +addressed. + +[8.3 Regressions]: https://gitlab.com/gitlab-org/gitlab-ce/issues/4127 +[update the notes]: https://gitlab.com/gitlab-org/release-tools/blob/master/doc/pro-tips.md#update-the-regression-issue + +### Technical debt + +In order to track things that can be improved in GitLab's codebase, we created +the ~"technical debt" label in [GitLab's issue tracker][pages-tracker]. + +This label should be added to issues that describe things that can be improved, +shortcuts that have been taken, code that needs refactoring, features that need +additional attention, and all other things that have been left behind due to +high velocity of development. + +Everyone can create an issue, though you may need to ask for adding a specific +label, if you do not have permissions to do it by yourself. Additional labels +can be combined with the `technical debt` label, to make it easier to schedule +the improvements for a release. + +Issues tagged with the `technical debt` label have the same priority like issues +that describe a new feature to be introduced in GitLab, and should be scheduled +for a release by the appropriate person. + +Make sure to mention the merge request that the `technical debt` issue is +associated with in the description of the issue. + +### Stewardship + +For issues related to the open source stewardship of GitLab, +there is the ~"stewardship" label. + +This label is to be used for issues in which the stewardship of GitLab +is a topic of discussion. For instance if GitLab Inc. is planning to remove +features from GitLab CE to make exclusive in GitLab EE, related issues +would be labelled with ~"stewardship". + +A recent example of this was the issue for +[bringing the time tracking API to GitLab CE][time-tracking-issue]. + +[time-tracking-issue]: https://gitlab.com/gitlab-org/gitlab-ce/issues/25517#note_20019084 + +## Merge requests + +We welcome merge requests with fixes and improvements to GitLab code, tests, +and/or documentation. The issues that are specifically suitable for +community contributions are listed with the label +[`Accepting Merge Requests` on our issue tracker for Pages][accepting-mrs], +but you are free to contribute to any other issue you want. + +Please note that if an issue is marked for the current milestone either before +or while you are working on it, a team member may take over the merge request +in order to ensure the work is finished before the release date. + +If you want to add a new feature that is not labeled it is best to first create +a feedback issue (if there isn't one already) and leave a comment asking for it +to be marked as `Accepting Merge Requests`. Please include screenshots or +wireframes if the feature will also change the UI. + +Merge requests should be opened at [GitLab.com][pages-mr-tracker]. + +If you are new to GitLab development (or web development in general), see the +[I want to contribute!](#i-want-to-contribute) section to get you started with +some potentially easy issues. + +To start with GitLab development download the [GitLab Development Kit][gdk] and +see the [Development section](https://docs.gitlab.com/ee/development/README.html) for some guidelines. + +### Merge request guidelines + +If you can, please submit a merge request with the fix or improvements +including tests. If you don't know how to fix the issue but can write a test +that exposes the issue we will accept that as well. In general bug fixes that +include a regression test are merged quickly while new features without proper +tests are least likely to receive timely feedback. The workflow to make a merge +request is as follows: + +1. Fork the project into your personal space on GitLab.com +1. Create a feature branch, branch away from `master` +1. Write [tests](https://gitlab.com/gitlab-org/gitlab-development-kit#running-the-tests) and code +1. If you are writing documentation, make sure to follow the + [documentation styleguide][doc-styleguide] +1. If you have multiple commits please combine them into a few logically + organized commits by [squashing them][git-squash] +1. Push the commit(s) to your fork +1. Submit a merge request (MR) to the `master` branch + 1. You don't have to select any approvers, but you can if you really want + specific people to approve your merge request +1. The MR title should describe the change you want to make +1. The MR description should give a motive for your change and the method you + used to achieve it. + 1. Mention the issue(s) your merge request solves, using the `Solves #XXX` or + `Closes #XXX` syntax to auto-close the issue(s) once the merge request will + be merged. +1. If you're allowed to, set a relevant milestone and labels +1. Be prepared to answer questions and incorporate feedback even if requests + for this arrive weeks or months after your MR submission + 1. If a discussion has been addressed, select the "Resolve discussion" button + beneath it to mark it resolved. +1. If your MR touches code that executes shell commands, reads or opens files or + handles paths to files on disk, make sure it adheres to the + [shell command guidelines](https://docs.gitlab.com/ee/development/shell_commands.html) +1. If your code creates new files on disk please read the + [shared files guidelines](https://docs.gitlab.com/ee/development/shared_files.html). +1. When writing commit messages please follow + [these](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) + [guidelines](http://chris.beams.io/posts/git-commit/). + +Please keep the change in a single MR **as small as possible**. If you want to +contribute a large feature think very hard what the minimum viable change is. +Can you split the functionality? Can you only submit the backend/API code? Can +you start with a very simple UI? Can you do part of the refactor? The increased +reviewability of small MRs that leads to higher code quality is more important +to us than having a minimal commit log. The smaller an MR is the more likely it +is it will be merged (quickly). After that you can send more MRs to enhance it. +The ['How to get faster PR reviews' document of Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/faster_reviews.md) also has some great points regarding this. + +For examples of feedback on merge requests please look at already +[closed merge requests][closed-merge-requests]. If you would like quick feedback +on your merge request feel free to mention someone from the [core team] or one +of the [Merge request coaches][team]. +Please ensure that your merge request meets the contribution acceptance criteria. + +When having your code reviewed and when reviewing merge requests please take the +[code review guidelines](https://docs.gitlab.com/ee/development/code_review.html) into account. + +### Contribution acceptance criteria + +1. The change is as small as possible +1. Include proper tests and make all tests pass (unless it contains a test + exposing a bug in existing code). Every new class should have corresponding + unit tests, even if the class is exercised at a higher level, such as a feature test. +1. If you suspect a failing CI build is unrelated to your contribution, you may + try and restart the failing CI job or ask a developer to fix the + aforementioned failing test +1. Your MR initially contains a single commit (please use `git rebase -i` to + squash commits) +1. Your changes can merge without problems (if not please rebase if you're the + only one working on your feature branch, otherwise, merge `master`) +1. Does not break any existing functionality +1. Fixes one specific issue or implements one specific feature (do not combine + things, send separate merge requests if needed) +1. Keeps the GitLab code base clean and well structured +1. Contains functionality we think other users will benefit from too +1. Doesn't add configuration options or settings options since they complicate + making and testing future changes +1. Changes do not adversely degrade performance. + - Avoid repeated polling of endpoints that require a significant amount of overhead + - Avoid repeated access of filesystem +1. Changes after submitting the merge request should be in separate commits + (no squashing). +1. It conforms to the [style guides](#style-guides) and the following: + - If your change touches a line that does not follow the style, modify the + entire line to follow it. This prevents linting tools from generating warnings. + - Don't touch neighbouring lines. As an exception, automatic mass + refactoring modifications may leave style non-compliant. +1. If the merge request adds any new libraries (gems, JavaScript libraries, + etc.), they should conform to our [Licensing guidelines][license-finder-doc]. + +## Definition of done + +If you contribute to GitLab please know that changes involve more than just +code. We have the following [definition of done][definition-of-done]. Please ensure you support +the feature you contribute through all of these steps. + +1. Description explaining the relevancy (see following item) +1. Working and clean code that is commented where needed +1. [Unit and system tests][testing] that pass on the CI server +1. Performance/scalability implications have been considered, addressed, and tested +1. [Documented][doc-styleguide] in the `/doc` directory +1. Reviewed and any concerns are addressed +1. Merged by a project maintainer +1. Added to the release blog article, if relevant +1. Added to [the website](https://gitlab.com/gitlab-com/www-gitlab-com/), if relevant +1. Community questions answered +1. Answers to questions radiated (in docs/wiki/support etc.) + +If you add a dependency in GitLab (such as an operating system package) please +consider updating the following and note the applicability of each in your +merge request: + +1. Note the addition in the release blog post (create one if it doesn't exist yet) https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/ +1. Upgrade guide, for example https://gitlab.com/gitlab-org/gitlab-ce/blob/master/doc/update/7.5-to-7.6.md +1. Upgrader https://gitlab.com/gitlab-org/gitlab-ce/blob/master/doc/update/upgrader.md#2-run-gitlab-upgrade-tool +1. Installation guide https://gitlab.com/gitlab-org/gitlab-ce/blob/master/doc/install/installation.md#1-packages-dependencies +1. GitLab Development Kit https://gitlab.com/gitlab-org/gitlab-development-kit +1. Test suite https://gitlab.com/gitlab-org/gitlab-ce/blob/master/scripts/prepare_build.sh +1. Omnibus package creator https://gitlab.com/gitlab-org/omnibus-gitlab + +## Style guides + +1. [Ruby](https://github.com/bbatsov/ruby-style-guide). + Important sections include [Source Code Layout][rss-source] and + [Naming][rss-naming]. Use: + - multi-line method chaining style **Option A**: dot `.` on the second line + - string literal quoting style **Option A**: single quoted by default +1. [Rails](https://github.com/bbatsov/rails-style-guide) +1. [Newlines styleguide][newlines-styleguide] +1. [Testing][testing] +1. [JavaScript styleguide][js-styleguide] +1. [SCSS styleguide][scss-styleguide] +1. [Shell commands](doc/development/shell_commands.md) created by GitLab + contributors to enhance security +1. [Database Migrations](doc/development/migration_style_guide.md) +1. [Markdown](http://www.cirosantilli.com/markdown-styleguide) +1. [Documentation styleguide][doc-styleguide] +1. Interface text should be written subjectively instead of objectively. It + should be the GitLab core team addressing a person. It should be written in + present time and never use past tense (has been/was). For example instead + of _prohibited this user from being saved due to the following errors:_ the + text should be _sorry, we could not create your account because:_ + +This is also the style used by linting tools such as +[RuboCop](https://github.com/bbatsov/rubocop), +[PullReview](https://www.pullreview.com/) and [Hound CI](https://houndci.com). + +## Code of conduct + +As contributors and maintainers of this project, we pledge to respect all +people who contribute through reporting issues, posting feature requests, +updating documentation, submitting pull requests or patches, and other +activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, or religion. + +Examples of unacceptable behavior by participants include the use of sexual +language or imagery, derogatory comments or personal attacks, trolling, public +or private harassment, insults, or other unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. Project maintainers who do not +follow the Code of Conduct may be removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior can be +reported by emailing `contact@gitlab.com`. + +This Code of Conduct is adapted from the [Contributor Covenant][contributor-covenant], version 1.1.0, +available at [http://contributor-covenant.org/version/1/1/0/](http://contributor-covenant.org/version/1/1/0/). + +[core team]: https://about.gitlab.com/core-team/ +[team]: https://about.gitlab.com/team/ +[getting-help]: https://about.gitlab.com/getting-help/ +[codetriage]: http://www.codetriage.com/gitlabhq/gitlabhq +[accepting-mrs-weight]: https://gitlab.com/gitlab-org/gitlab-pages/issues?assignee_id=0&label_name[]=Accepting%20Merge%20Requests&sort=weight_asc +[pages-tracker]: https://gitlab.com/gitlab-org/gitlab-pages/issues +[google-group]: https://groups.google.com/forum/#!forum/gitlabhq +[stackoverflow]: https://stackoverflow.com/questions/tagged/gitlab +[fpl]: https://gitlab.com/gitlab-org/gitlab-ce/issues?label_name=feature+proposal +[accepting-mrs]: https://gitlab.com/gitlab-org/gitlab-pages/issues?label_name=Accepting+Merge+Requests +[pages-mr-tracker]: https://gitlab.com/gitlab-org/gitlab-pages/merge_requests +[gdk]: https://gitlab.com/gitlab-org/gitlab-development-kit +[git-squash]: https://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits +[closed-merge-requests]: https://gitlab.com/gitlab-org/gitlab-pages/merge_requests?assignee_id=&label_name=&milestone_id=&scope=&sort=&state=closed +[definition-of-done]: http://guide.agilealliance.org/guide/definition-of-done.html +[contributor-covenant]: http://contributor-covenant.org +[rss-source]: https://github.com/bbatsov/ruby-style-guide/blob/master/README.md#source-code-layout +[rss-naming]: https://github.com/bbatsov/ruby-style-guide/blob/master/README.md#naming +[doc-styleguide]: https://docs.gitlab.com/ee/development/doc_styleguide.html "Documentation styleguide" +[js-styleguide]: https://docs.gitlab.com/ee/development/fe_guide/style_guide_js.html "JavaScript styleguide" +[scss-styleguide]: https://docs.gitlab.com/ee/development/fe_guide/style_guide_scss.html "SCSS styleguide" +[newlines-styleguide]: https://docs.gitlab.com/ee/development/newlines_styleguide.html "Newlines styleguide" +[UX Guide for GitLab]: http://docs.gitlab.com/ce/development/ux_guide/ +[license-finder-doc]: https://docs.gitlab.com/ee/development/licensing.html +[GitLab Inc engineering workflow]: https://about.gitlab.com/handbook/engineering/workflow/#labelling-issues +[polling-etag]: https://docs.gitlab.com/ce/development/polling.html +[testing]: https://docs.gitlab.com/ee/development/testing.html + +[^1]: Please note that specs other than JavaScript specs are considered backend + code. diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..a90ea9395 --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright GitLab B.V. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..6dd8662b6 --- /dev/null +++ b/Makefile @@ -0,0 +1,34 @@ +IMPORT_PATH := gitlab.com/gitlab-org/gitlab-pages +V := 1 + +# Space separated patterns of packages to skip in list, test, fmt. +IGNORED_PACKAGES := /vendor/ /internal/httputil/ /internal/mocks/ + +# GitLab Pages is statically compiled without CGO to help it in chroot mode +export CGO_ENABLED := 0 + +include Makefile.build.mk +include Makefile.util.mk +include Makefile.internal.mk + +# Based on https://github.com/cloudflare/hellogopher - v1.1 - MIT License +# +# Copyright (c) 2017 Cloudflare +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff --git a/Makefile.build.mk b/Makefile.build.mk new file mode 100644 index 000000000..2656e62e5 --- /dev/null +++ b/Makefile.build.mk @@ -0,0 +1,26 @@ +GOLANGCI_LINT_VERSION := v1.27.0 # version used by $GOLANGCI_LINT_IMAGE + +GO_BUILD_TAGS := continuous_profiler_stackdriver + +.PHONY: all setup generate-mocks build clean + +all: gitlab-pages + +setup: clean .GOPATH/.ok + go get github.com/wadey/gocovmerge@v0.0.0-20160331181800-b5bfa59ec0ad + go get github.com/golang/mock/mockgen@v1.3.1 + go get github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION) + go get github.com/jstemmer/go-junit-report + +generate-mocks: .GOPATH/.ok + $Q bin/mockgen -source=internal/interface.go -destination=internal/mocks/mocks.go -package=mocks + +build: .GOPATH/.ok + $Q GOBIN=$(CURDIR)/bin go install $(if $V,-v) $(VERSION_FLAGS) -tags "${GO_BUILD_TAGS}" -buildmode exe $(IMPORT_PATH) + +clean: + $Q rm -rf bin .GOPATH gitlab-pages + +gitlab-pages: build + $Q cp -f ./bin/gitlab-pages . + diff --git a/Makefile.internal.mk b/Makefile.internal.mk new file mode 100644 index 000000000..54e7f7da3 --- /dev/null +++ b/Makefile.internal.mk @@ -0,0 +1,48 @@ +REVISION := $(shell git rev-parse --short HEAD || echo unknown) +LAST_TAG := $(shell git describe --tags --abbrev=0) +COMMITS := $(shell echo `git log --oneline $(LAST_TAG)..HEAD | wc -l`) +VERSION := $(shell cat VERSION) + +ifneq (v$(VERSION),$(LAST_TAG)) + VERSION := $(shell echo $(VERSION)~beta.$(COMMITS).g$(REVISION)) +endif + +VERSION_FLAGS := -ldflags='-X "main.VERSION=$(VERSION)" -X "main.REVISION=$(REVISION)"' + +# cd into the GOPATH to workaround ./... not following symlinks +_allpackages = $(shell ( cd $(CURDIR)/.GOPATH/src/$(IMPORT_PATH) && \ + GOPATH=$(CURDIR)/.GOPATH go list ./... 2>&1 1>&3 | \ + grep -v -e "^$$" $(addprefix -e ,$(IGNORED_PACKAGES)) 1>&2 ) 3>&1 | \ + grep -v -e "^$$" $(addprefix -e ,$(IGNORED_PACKAGES))) + +_allfiles = $(shell cd $(CURDIR)/.GOPATH/src/$(IMPORT_PATH) && find . -iname '*.go' | grep -v "^\./\." | grep -v -e "^$$" $(addprefix -e ,$(IGNORED_PACKAGES)) ) + +# memoize allpackages, so that it's executed only once and only if used +allpackages = $(if $(__allpackages),,$(eval __allpackages := $$(_allpackages)))$(__allpackages) +allfiles = $(if $(__allfiles),,$(eval __allfiles := $$(_allfiles)))$(__allfiles) + +export GOPATH := $(CURDIR)/.GOPATH +unexport GOBIN + +Q := $(if $V,,@) + +.GOPATH/.ok: + $Q mkdir -p "$(dir .GOPATH/src/$(IMPORT_PATH))" + $Q ln -s ../../../.. ".GOPATH/src/$(IMPORT_PATH)" + $Q mkdir -p .GOPATH/test .GOPATH/cover + $Q mkdir -p bin + $Q ln -s ../bin .GOPATH/bin + $Q touch $@ + +.PHONY: bin/gocovmerge bin/golangci-lint +bin/gocovmerge: .GOPATH/.ok + @test -x $@ || \ + { echo "Vendored gocovmerge not found, try running 'make setup'..."; exit 1; } + +bin/golangci-lint: .GOPATH/.ok + @test -x $@ || \ + { echo "Vendored golangci-lint not found, try running 'make setup'..."; exit 1; } + +bin/go-junit-report: .GOPATH/.ok + @test -x $@ || \ + { echo "Vendored go-junit-report not found, try running 'make setup'..."; exit 1; } diff --git a/Makefile.util.mk b/Makefile.util.mk new file mode 100644 index 000000000..4f190ea45 --- /dev/null +++ b/Makefile.util.mk @@ -0,0 +1,61 @@ +GOLANGCI_LINT_IMAGE := golangci/golangci-lint:$(GOLANGCI_LINT_VERSION) + +.PHONY: lint test race acceptance bench cover list deps-check deps-download + +OUT_FORMAT ?= colored-line-number +LINT_FLAGS ?= $(if $V,-v) +REPORT_FILE ?= + +lint: .GOPATH/.ok bin/golangci-lint + $Q ./bin/golangci-lint run ./... --out-format $(OUT_FORMAT) $(LINT_FLAGS) | tee ${REPORT_FILE} + +test: .GOPATH/.ok gitlab-pages + rm tests.out || true + go test $(if $V,-v) $(allpackages) 2>&1 | tee tests.out + +race: .GOPATH/.ok gitlab-pages + CGO_ENABLED=1 go test -race $(if $V,-v) $(allpackages) + +acceptance: .GOPATH/.ok gitlab-pages + go test $(if $V,-v) ./test/acceptance 2>&1 | tee tests.out + +bench: .GOPATH/.ok gitlab-pages + go test -bench=. -run=^$$ $(allpackages) + +# The acceptance tests cannot count for coverage +cover: bin/gocovmerge .GOPATH/.ok gitlab-pages + @echo "NOTE: make cover does not exit 1 on failure, don't use it to check for tests success!" + $Q rm -f .GOPATH/cover/*.out .GOPATH/cover/all.merged + $(if $V,@echo "-- go test -coverpkg=./... -coverprofile=.GOPATH/cover/... ./...") + @for MOD in $(allpackages); do \ + go test \ + -short \ + -coverpkg=`echo $(allpackages)|tr " " ","` \ + -coverprofile=.GOPATH/cover/unit-`echo $$MOD|tr "/" "_"`.out \ + $$MOD 2>&1 | grep -v "no packages being tested depend on"; \ + done + $Q ./bin/gocovmerge .GOPATH/cover/*.out > .GOPATH/cover/all.merged + $Q go tool cover -html .GOPATH/cover/all.merged -o coverage.html + @echo "" + @echo "=====> Total test coverage: <=====" + @echo "" + $Q go tool cover -func .GOPATH/cover/all.merged + +list: .GOPATH/.ok + @echo $(allpackages) + +deps-check: .GOPATH/.ok + go mod tidy + @if git diff --color=always --exit-code -- go.mod go.sum; then \ + echo "go.mod and go.sum are ok"; \ + else \ + echo ""; \ + echo "go.mod and go.sum are modified, please commit them";\ + exit 1; \ + fi; + +deps-download: .GOPATH/.ok + go mod download + +junit-report: .GOPATH/.ok bin/go-junit-report + cat tests.out | ./bin/go-junit-report -set-exit-code > junit-test-report.xml diff --git a/PROCESS.md b/PROCESS.md new file mode 100644 index 000000000..37708dd8e --- /dev/null +++ b/PROCESS.md @@ -0,0 +1,80 @@ +# GitLab Pages processes + +## Reviewing + +A contribution to GitLab Pages should generally be reviewed by at least two +people - one acting as initial reviewer, the other as a maintainer. Trivial +fixes may go straight to a maintainer. People should not merge their own +contributions. + +## Versioning + +GitLab Pages follows [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +The `X-Y-stable` branches and `master` should never have their history +rewritten. Tags should never be deleted. + +## Releasing + +Pages is tightly coupled to GitLab itself. To align with GitLab's +[development month](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/PROCESS.md), +new versions of GitLab Pages are released before the 7th of each month (assuming +any changes have been made). +To do so create [release issue](https://gitlab.com/gitlab-org/gitlab-pages/issues/new?issuable_template=release) and follow the instructions. + +## Stable releases + +Typically, release tags point to a specific commit on the **master** branch. As +the Pages repository experiences a low rate of change, this allows most releases +to happen in conformance with semver, without the overhead of multiple +[stable branches](https://docs.gitlab.com/ee/workflow/gitlab_flow.html). + +A bug fix may required in a particular version after the **master** branch has +moved on. This may happen between the 7th and 22nd of a release month, relating +to the **previous** release, or at any time for a security fix. + +GitLab may backport security fixes for up to three releases, which may +correspond to three separate minor versions of GitLab Pages - and so three new +versions to release. See [Security releases](#Security releases) for the details. + +In either case, the fix should first be developed against the master branch. +Once ready, the fix should be merged to master, where it will be +included in the next major or minor release as usual. + +The fix may be cherry-picked into each relevant stable branch, and a new patch +release made in the same way as defined above. + +When updating `GITLAB_PAGES_VERSION` in the [GitLab](https://gitlab.com/gitlab-org/gitlab) +repository, you should target the relevant `X-Y-stable` branches there. In +general, these branches should only ever have the patch version of GitLab pages +incremented. + +## Security releases + +Pages security releases are built on top of the [GitLab Security Release process]. Engineers follow +the same steps stated on the [Security Developer] guidelines with some adjustments: + +- Apart from the [security merge requests] created on [GitLab Security], merge requests will also be created on [GitLab Pages Security]: + - Merge request targeting `master` is prepared with the GitLab Pages security fix. + - Backports are prepared for the last releases corresponding to last 3 GitLab releases. + - Security merge requests are required to use the [merge request security template]. + - **It's important for these merge requests to not be associated with the Security Implementation Issue created on [GitLab Security], otherwise the security issue won't be considered by [Release Tools].** +- Security merge requests created on [GitLab Security] will bump the `GITLAB_PAGES_VERSION`. +- Once the merge requests on [GitLab Pages Security] are approved: + - Maintainers of GitLab Pages will merge the security merge requests **targeting stable branches** and create a new tag for these branches. + - Merge requests on GitLab Security are assigned to `@gitlab-release-tools-bot` so they can be automatically processed by [Release Tools]. + +- After the security release is published, maintainers of GitLab Pages: + - Merge the merge requests targeting `master`. + - Branches and tags across [GitLab Pages Security] and [GitLab Pages] are synced: + - `Master` and stable branches. + - Affected `v*.*.*` tags. + +[GitLab Security Release process]: https://gitlab.com/gitlab-org/release/docs/blob/master/general/security/process.md +[Security Developer]: https://gitlab.com/gitlab-org/release/docs/blob/master/general/security/developer.md +[GitLab Pages Security]: https://gitlab.com/gitlab-org/security/gitlab-pages/ +[security merge requests]: https://gitlab.com/gitlab-org/release/docs/blob/master/general/security/developer.md#create-merge-requests +[GitLab Security]: https://gitlab.com/gitlab-org/security/gitlab/ +[merge request security template]: https://gitlab.com/gitlab-org/gitlab-pages/-/blob/master/.gitlab/merge_request_templates/Security%20Release.md +[Release Tools]: https://gitlab.com/gitlab-org/release-tools/ +[GitLab Pages]: https://gitlab.com/gitlab-org/gitlab-pages diff --git a/README.md b/README.md new file mode 100644 index 000000000..9765c694e --- /dev/null +++ b/README.md @@ -0,0 +1,331 @@ +## GitLab Pages Daemon + +[![build status](https://gitlab.com/gitlab-org/gitlab-pages/badges/master/pipeline.svg)](https://gitlab.com/gitlab-org/gitlab-pages/commits/master) +[![coverage report](https://gitlab.com/gitlab-org/gitlab-pages/badges/master/coverage.svg)](https://gitlab.com/gitlab-org/gitlab-pages/commits/master) + +This is a simple HTTP server written in Go, made to serve GitLab Pages with +CNAMEs and SNI using HTTP/HTTP2. The minimum supported Go version is v1.13. + +This is made to work in small to medium-scale environments. Start-up time scales +with the number of projects being served, so the daemon is currently unsuitable +for very large-scale environments. + +### How it generates routes + +1. It reads the `pages-root` directory to list all groups. +2. It looks for `config.json` files in `pages-root/group/project` directories, + reads them and creates mapping for custom domains and certificates. +3. It generates virtual hosts from these data. +4. Periodically (every second) it checks the `pages-root/.update` file and reads + its content to verify if there was an update. + +To reload the configuration, fill the `pages-root/.update` file with random +content. The reload will be done asynchronously, and it will not interrupt the +current requests. + +### How it serves content + +1. When client initiates the TLS connection, the GitLab-Pages daemon looks in + the generated configuration for virtual hosts. If present, it uses the TLS + key and certificate in `config.json`, otherwise it falls back to the global + configuration. +2. When client connects to an HTTP port, the GitLab-Pages daemon looks in the + generated configuration for a matching virtual host. +3. The URL.Path is split into `//` and the daemon tries to + load: `pages-root/group/project/public/subpath`. +4. If the file is not found, it will try to load `pages-root/group//public/`. +5. If requested path is a directory, the `index.html` will be served. +6. If `.../path.gz` exists, it will be served instead of the main file, with + a `Content-Encoding: gzip` header. This allows compressed versions of the + files to be precalculated, saving CPU time and network bandwidth. + +### HTTPS only domains + +Users have the option to enable "HTTPS only pages" on a per-project basis. +This option is also enabled by default for all newly-created projects. + +When the option is enabled, a project's `config.json` will contain an +`https_only` attribute. + +When the `https_only` attribute is found in the root context, any project pages +served over HTTP via the group domain (i.e. `username.gitlab.io`) will be 301 +redirected to HTTPS. + +When the attribute is found in a custom domain's configuration, any HTTP +requests to this domain will likewise be redirected. + +If the attribute's value is false, or the attribute is missing, then +the content will be served to the client over HTTP. + +### How it should be run? + +Ideally the GitLab Pages should run without any load balancer in front of it. + +If a load balancer is required, the HTTP can be served in HTTP mode. For HTTPS +traffic, the load balancer should be run in TCP mode. If the load balancer is +run in SSL-offloading mode, custom TLS certificates will not work. + +### How to run it + +Example: +``` +$ make +$ ./gitlab-pages -listen-http ":8090" -pages-root path/to/gitlab/shared/pages -pages-domain example.com +``` + +To run on HTTPS ensure you have a root certificate key pair available + +``` +$ make +$ ./gitlab-pages -listen-https ":9090" -root-cert=path/to/example.com.crt -root-key=path/to/example.com.key -pages-root path/to/gitlab/shared/pages -pages-domain example.com +``` + +### Getting started with development + +See [doc/development.md](doc/development.md) + + +### Run daemon **in secure mode** + +When compiled with `CGO_ENABLED=0` (which is the default), `gitlab-pages` is a +static binary and so can be run in chroot with dropped privileges. + +To enter this mode, run `gitlab-pages` as the root user and pass it the +`-daemon-uid` and `-daemon-gid` arguments to specify the user you want it to run +as. + +The daemon starts listening on ports and reads certificates as root, then +re-executes itself as the specified user. When re-executing it creates a chroot jail +containing a copy of its own binary, `/etc/hosts`, `/etc/resolv.conf`, and a bind mount of `pages-root`. + +When `-artifacts-server` points to an HTTPS URL we also need a list of certificates for +the trusted Certification Authorities to copy inside the jail. +A file containing such list can be specified using `SSL_CERT_FILE` environment variable. +(`SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt` on Debian) + +This makes it possible to listen on privileged ports and makes it harder for the +process to read files outside of `pages-root`. + +Example: +``` +$ make +$ sudo ./gitlab-pages -listen-http ":80" -pages-root path/to/gitlab/shared/pages -pages-domain example.com -daemon-uid 1000 -daemon-gid 1000 +``` + +#### Caveats + +The `/etc/hosts` and `/etc/resolv.conf` files, and any file pointed to by the `SSL_CERT_FILE` +environment variable, will be copied into the jail. As a result, changes to +these files will not be reflected in Pages until it's restarted. + +Bind mounts are unavailable on a range of non-Linux systems. Some of these +systems (e.g., BSD) offer native "jail" functionality. It is recommended to set +up an externally-managed jail and run the Pages daemon within it as an ordinary +user if available. + +A less-functional (but just as secure) operation mode is provided via the +`-daemon-inplace-chroot` command-line option. If passed, Pages will daemonize +as usual, but chroot directly to the `-pages-root` directory instead of building +a complete jail in the system temporary directory. There are some known issues +with this mode, such as: + +* Pages service will not be able to resolve the domain name of the auth server and the artifacts server due to missing `/etc/resolv.conf` at the chroot directory. As a workaround, you can manually copy the file to the pages root directory, however, it might cause a conflict with an existing pages data. As a result of DNS not working: + * [GitLab access control](#gitlab-access-control) might not work + * [Online view of HTML artifacts](https://about.gitlab.com/2017/10/22/gitlab-10-1-released/#online-view-of-html-artifacts) may not work. You can disable it and fall back to downloading artifacts by setting `artifacts_server` to `false` in `gitlab.yml` for your GitLab instance: + ```yml + ## GitLab Pages + pages: + enabled: true + artifacts_server: false + ``` +* TLS operation (on some systems) will not work + +The default secure mode will also fail for certain Linux-based configurations. +Known cases include: + +* The Pages daemon is running inside an unprivileged container + * Bind mount functionality requires the `CAP_SYS_ADMIN` privilege + * This is only available to containers run in privileged mode +* The system temporary directory is mounted `noexec` or `nodev` + * The jail is created in `$TMPDIR`. + * Character device files are created within the jail + * A copy of the gitlab-pages executable is run from within the bind mount +* AppArmor/SELinux is enabled + * These systems disallow bind-mounting in certain configurations + +In these cases, workarounds are similar to those documented for non-Linux +systems - use an external jailing technology, or fall back to the pre-v0.8.0 +behaviour using `-daemon-inplace-chroot`. + +On Linux, Docker and other containerization systems can be used to build a jail +within which the Pages daemon can safely run with secure mode disabled. However, +this configuration **is not secure** if simply using the default +`gitlab/gitlab-ce` and `gitlab-gitlab-ee` Docker containers! + +### Listen on multiple ports + +Each of the `listen-http`, `listen-https` and `listen-proxy` arguments can be +provided multiple times. Gitlab Pages will accept connections to them all. + +Example: +``` +$ make +$ ./gitlab-pages -listen-http "10.0.0.1:8080" -listen-https "[fd00::1]:8080" -pages-root path/to/gitlab/shared/pages -pages-domain example.com +``` + +This is most useful in dual-stack environments (IPv4+IPv6) where both Gitlab +Pages and another HTTP server have to co-exist on the same server. + + +#### Listening behind a reverse proxy + +When `listen-proxy` is used please make sure that your reverse proxy solution is configured to strip the [RFC7239 Forwarded headers](https://tools.ietf.org/html/rfc7239). + +We use `gorilla/handlers.ProxyHeaders` middleware. For more information please review the [gorilla/handlers#ProxyHeaders](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) documentation. + +> NOTE: This middleware should only be used when behind a reverse proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are configured not to) strip these headers from client requests, or where these headers are accepted "as is" from a remote client (e.g. when Go is not behind a proxy), can manifest as a vulnerability if your application uses these headers for validating the 'trustworthiness' of a request. + +### PROXY protocol for HTTPS + +The above `listen-proxy` option only works for plaintext HTTP, where the reverse +proxy was already able to parse the incoming HTTP traffic and inject a header for +the remote client IP. + +This does not work for HTTPS which is generally proxied at the TCP level. In +order to propagate the remote client IP in this case, you can use the +[PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). +This is supported by HAProxy and some third party services such as Cloudflare. + +To configure PROXY protocol support, run `gitlab-pages` with the +`listen-https-proxyv2` flag. + +If you are using HAProxy as your TCP load balancer, you can configure the backend +with the `send-proxy-v2` option, like so: + +``` +frontend fe + bind 127.0.0.1:12340 + mode tcp + default_backend be + +backend be + mode tcp + server app1 127.0.0.1:1234 send-proxy-v2 +``` + +### GitLab access control + +GitLab access control is configured with properties `auth-client-id`, `auth-client-secret`, `auth-redirect-uri`, `auth-server` and `auth-secret`. Client ID, secret and redirect uri are configured in the GitLab and should match. `auth-server` points to a GitLab instance used for authentication. `auth-redirect-uri` should be `http(s)://pages-domain/auth`. Note that if the pages-domain is not handled by GitLab pages, then the `auth-redirect-uri` should use some reserved namespace prefix (such as `http(s)://projects.pages-domain/auth`). Using HTTPS is _strongly_ encouraged. `auth-secret` is used to encrypt the session cookie, and it should be strong enough. + +Example: +``` +$ make +$ ./gitlab-pages -listen-http "10.0.0.1:8080" -listen-https "[fd00::1]:8080" -pages-root path/to/gitlab/shared/pages -pages-domain example.com -auth-client-id -auth-client-secret -auth-redirect-uri https://projects.example.com/auth -auth-secret something-very-secret -auth-server https://gitlab.com +``` + +> NOTE: GitLab access control might not work with `-daemon-inplace-chroot` option. Please take a look at [the caveat section](#caveats) above. + +#### How it works + +1. GitLab pages looks for `access_control` and `id` fields in `config.json` files + in `pages-root/group/project` directories. +2. For projects that have `access_control` set to `true` pages will require user to authenticate. +3. When user accesses a project that requires authentication, user will be redirected + to GitLab to log in and grant access for GitLab pages. +4. When user grants access to GitLab pages, pages will use the OAuth2 `code` to get an access + token which is stored in the user session cookie. +5. Pages will now check user's access to a project with a access token stored in the user + session cookie. This is done via a request to GitLab API with the user's access token. +6. If token is invalidated, user will be redirected again to GitLab to authorize pages again. + +### Enable Prometheus Metrics + +For monitoring purposes, you can pass the `-metrics-address` flag when starting. +This will expose general metrics about the Go runtime and pages application for +[Prometheus](https://prometheus.io/) to scrape. + +Example: +``` +$ make +$ ./gitlab-pages -listen-http ":8090" -metrics-address ":9235" -pages-root path/to/gitlab/shared/pages -pages-domain example.com +``` + +### Structured logging + +You can use the `-log-format json` option to make GitLab Pages output +JSON-structured logs. This makes it easer to parse and search logs +with tools such as [ELK](https://www.elastic.co/elk-stack). + +### Cross-origin requests + +GitLab Pages defaults to allowing cross-origin requests for any resource it +serves. This can be disabled globally by passing `-disable-cross-origin-requests` +when starting the daemon. + +Having cross-origin requests enabled allows third-party websites to make use of +files stored on the Pages server, which allows various third-party integrations +to work. However, if it's running on a private network, this may allow websites +on the public Internet to access its contents *via* your user's browsers - +assuming they know the URL beforehand. + +### SSL/TLS versions + +GitLab Pages defaults to TLS 1.2 as the minimum supported TLS version. This can be +configured by using the `-tls-min-version` and `-tls-max-version` options. Accepted +values are `ssl3`, `tls1.0`, `tls1.1`, `tls1.2`, and `tls1.3` (if supported). When `tls1.3` +is used GitLab Pages will add `tls13=1` to `GODEBUG` to enable TLS 1.3. +See https://golang.org/src/crypto/tls/tls.go for more. + +### Custom headers + +To specify custom headers that should be sent with every request on GitLab pages, use the `-header` argument. + +You can add as many headers as you like. + +Example: +```sh +./gitlab-pages -header "Content-Security-Policy: default-src 'self' *.example.com" -header "X-Test: Testing" ... +``` + +### Configuration + +The daemon can be configured with any combination of these methods: +1. Command-line options +1. Environment variables +1. Configuration file +1. Compile-time defaults + +To see the available options and defaults, run: + +``` +./gitlab-pages -help +``` + +When using more than one method (e.g., configuration file and command-line +options), they follow the order of precedence given above. + +To convert a flag name into an environment variable name: +- Drop the leading - +- Convert all - characters into _ +- Uppercase the flag + +e.g., `-pages-domain=example.com` becomes `PAGES_DOMAIN=example.com` + +A configuration file is specified with the `-config` flag (or `CONFIG` +environment variable). Directives are specified in `key=value` format, like: + +``` +pages-domain=example.com +use-http2=false +``` + + + +### Testing and linting + +See [doc/development.md](doc/development.md) + + +### License + +MIT diff --git a/VERSION b/VERSION new file mode 100644 index 000000000..2b17ffd50 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.34.0 diff --git a/app.go b/app.go new file mode 100644 index 000000000..ec0c36a58 --- /dev/null +++ b/app.go @@ -0,0 +1,553 @@ +package main + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "os" + "sync" + "time" + + ghandlers "github.com/gorilla/handlers" + "github.com/rs/cors" + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/go-mimedb" + "gitlab.com/gitlab-org/labkit/errortracking" + labmetrics "gitlab.com/gitlab-org/labkit/metrics" + "gitlab.com/gitlab-org/labkit/monitoring" + + "gitlab.com/gitlab-org/gitlab-pages/internal/acme" + "gitlab.com/gitlab-org/gitlab-pages/internal/artifact" + "gitlab.com/gitlab-org/gitlab-pages/internal/auth" + cfg "gitlab.com/gitlab-org/gitlab-pages/internal/config" + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/handlers" + "gitlab.com/gitlab-org/gitlab-pages/internal/httperrors" + "gitlab.com/gitlab-org/gitlab-pages/internal/logging" + "gitlab.com/gitlab-org/gitlab-pages/internal/middleware" + "gitlab.com/gitlab-org/gitlab-pages/internal/netutil" + "gitlab.com/gitlab-org/gitlab-pages/internal/rejectmethods" + "gitlab.com/gitlab-org/gitlab-pages/internal/request" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk/zip" + "gitlab.com/gitlab-org/gitlab-pages/internal/source" + "gitlab.com/gitlab-org/gitlab-pages/internal/tlsconfig" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +const ( + xForwardedHost = "X-Forwarded-Host" +) + +var ( + corsHandler = cors.New(cors.Options{AllowedMethods: []string{"GET"}}) +) + +type theApp struct { + appConfig + domains *source.Domains + Artifact *artifact.Artifact + Auth *auth.Auth + Handlers *handlers.Handlers + AcmeMiddleware *acme.Middleware + CustomHeaders http.Header +} + +func (a *theApp) isReady() bool { + return a.domains.IsReady() +} + +func (a *theApp) ServeTLS(ch *tls.ClientHelloInfo) (*tls.Certificate, error) { + if ch.ServerName == "" { + return nil, nil + } + + if domain, _ := a.domain(ch.ServerName); domain != nil { + tls, _ := domain.EnsureCertificate() + return tls, nil + } + + return nil, nil +} + +func (a *theApp) healthCheck(w http.ResponseWriter, r *http.Request, https bool) { + if a.isReady() { + w.Write([]byte("success\n")) + } else { + http.Error(w, "not yet ready", http.StatusServiceUnavailable) + } +} + +func (a *theApp) redirectToHTTPS(w http.ResponseWriter, r *http.Request, statusCode int) { + u := *r.URL + u.Scheme = request.SchemeHTTPS + u.Host = r.Host + u.User = nil + + http.Redirect(w, r, u.String(), statusCode) +} + +func (a *theApp) getHostAndDomain(r *http.Request) (string, *domain.Domain, error) { + host := request.GetHostWithoutPort(r) + domain, err := a.domain(host) + + return host, domain, err +} + +func (a *theApp) domain(host string) (*domain.Domain, error) { + return a.domains.GetDomain(host) +} + +// checkAuthAndServeNotFound performs the auth process if domain can't be found +// the main purpose of this process is to avoid leaking the project existence/not-existence +// by behaving the same if user has no access to the project or if project simply does not exists +func (a *theApp) checkAuthAndServeNotFound(domain *domain.Domain, w http.ResponseWriter, r *http.Request) bool { + // To avoid user knowing if pages exist, we will force user to login and authorize pages + if a.Auth.CheckAuthenticationWithoutProject(w, r, domain) { + return true + } + + // auth succeeded try to serve the correct 404 page + domain.ServeNotFoundAuthFailed(w, r) + return true +} + +func (a *theApp) tryAuxiliaryHandlers(w http.ResponseWriter, r *http.Request, https bool, host string, domain *domain.Domain) bool { + // Add auto redirect + if !https && a.RedirectHTTP { + a.redirectToHTTPS(w, r, http.StatusTemporaryRedirect) + return true + } + + if a.Handlers.HandleArtifactRequest(host, w, r) { + return true + } + + if !a.isReady() { + httperrors.Serve503(w) + return true + } + + if !domain.HasLookupPath(r) { + // redirect to auth and serve not found + if a.checkAuthAndServeNotFound(domain, w, r) { + return true + } + } + + if !https && domain.IsHTTPSOnly(r) { + a.redirectToHTTPS(w, r, http.StatusMovedPermanently) + return true + } + + return false +} + +// routingMiddleware will determine the host and domain for the request, for +// downstream middlewares to use +func (a *theApp) routingMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // if we could not retrieve a domain from domains source we break the + // middleware chain and simply respond with 502 after logging this + host, d, err := a.getHostAndDomain(r) + if err != nil && !errors.Is(err, domain.ErrDomainDoesNotExist) { + metrics.DomainsSourceFailures.Inc() + log.WithError(err).Error("could not fetch domain information from a source") + + httperrors.Serve502(w) + return + } + + r = request.WithHostAndDomain(r, host, d) + + handler.ServeHTTP(w, r) + }) +} + +// healthCheckMiddleware is serving the application status check +func (a *theApp) healthCheckMiddleware(handler http.Handler) (http.Handler, error) { + healthCheck := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + a.healthCheck(w, r, request.IsHTTPS(r)) + }) + + loggedHealthCheck, err := logging.BasicAccessLogger(healthCheck, a.LogFormat, nil) + if err != nil { + return nil, err + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == a.appConfig.StatusPath { + loggedHealthCheck.ServeHTTP(w, r) + return + } + + handler.ServeHTTP(w, r) + }), nil +} + +// customHeadersMiddleware will inject custom headers into the response +func (a *theApp) customHeadersMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + middleware.AddCustomHeaders(w, a.CustomHeaders) + + handler.ServeHTTP(w, r) + }) +} + +// acmeMiddleware will handle ACME challenges +func (a *theApp) acmeMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + domain := request.GetDomain(r) + + if a.AcmeMiddleware.ServeAcmeChallenges(w, r, domain) { + return + } + + handler.ServeHTTP(w, r) + }) +} + +// authMiddleware handles authentication requests +func (a *theApp) authMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if a.Auth.TryAuthenticate(w, r, a.domains) { + return + } + + handler.ServeHTTP(w, r) + }) +} + +// auxiliaryMiddleware will handle status updates, not-ready requests and other +// not static-content responses +func (a *theApp) auxiliaryMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + host := request.GetHost(r) + domain := request.GetDomain(r) + https := request.IsHTTPS(r) + + if a.tryAuxiliaryHandlers(w, r, https, host, domain) { + return + } + + handler.ServeHTTP(w, r) + }) +} + +// accessControlMiddleware will handle authorization +func (a *theApp) accessControlMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + domain := request.GetDomain(r) + + // Only for projects that have access control enabled + if domain.IsAccessControlEnabled(r) { + // accessControlMiddleware + if a.Auth.CheckAuthentication(w, r, domain) { + return + } + } + + handler.ServeHTTP(w, r) + }) +} + +// serveFileOrNotFoundHandler will serve static content or +// return a 404 Not Found response +func (a *theApp) serveFileOrNotFoundHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + defer metrics.ServingTime.Observe(time.Since(start).Seconds()) + + domain := request.GetDomain(r) + fileServed := domain.ServeFileHTTP(w, r) + + if !fileServed { + // We need to trigger authentication flow here if file does not exist to prevent exposing possibly private project existence, + // because the projects override the paths of the namespace project and they might be private even though + // namespace project is public + if domain.IsNamespaceProject(r) { + if a.Auth.CheckAuthenticationWithoutProject(w, r, domain) { + return + } + } + + // domain found and authentication succeeds + domain.ServeNotFoundHTTP(w, r) + } + }) +} + +// httpInitialMiddleware sets up HTTP requests +func (a *theApp) httpInitialMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler.ServeHTTP(w, setRequestScheme(r)) + }) +} + +// proxyInitialMiddleware sets up proxy requests +func (a *theApp) proxyInitialMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if forwardedHost := r.Header.Get(xForwardedHost); forwardedHost != "" { + r.Host = forwardedHost + } + + handler.ServeHTTP(w, r) + }) +} + +// setRequestScheme will update r.URL.Scheme if empty based on r.TLS +func setRequestScheme(r *http.Request) *http.Request { + if r.URL.Scheme == request.SchemeHTTPS || r.TLS != nil { + // make sure is set for non-proxy requests + r.URL.Scheme = request.SchemeHTTPS + } else { + r.URL.Scheme = request.SchemeHTTP + } + + return r +} + +func (a *theApp) buildHandlerPipeline() (http.Handler, error) { + // Handlers should be applied in a reverse order + handler := a.serveFileOrNotFoundHandler() + if !a.DisableCrossOriginRequests { + handler = corsHandler.Handler(handler) + } + handler = a.accessControlMiddleware(handler) + handler = a.auxiliaryMiddleware(handler) + handler = a.authMiddleware(handler) + handler = a.acmeMiddleware(handler) + handler, err := logging.AccessLogger(handler, a.LogFormat) + if err != nil { + return nil, err + } + + // Metrics + metricsMiddleware := labmetrics.NewHandlerFactory(labmetrics.WithNamespace("gitlab_pages")) + handler = metricsMiddleware(handler) + + handler = a.routingMiddleware(handler) + + // Health Check + handler, err = a.healthCheckMiddleware(handler) + if err != nil { + return nil, err + } + + // Custom response headers + handler = a.customHeadersMiddleware(handler) + + // This MUST be the last handler! + // This handler blocks unknown HTTP methods, + // being the last means it will be evaluated first + // preventing any operation on bogus requests. + handler = rejectmethods.NewMiddleware(handler) + + return handler, nil +} + +func (a *theApp) Run() { + var wg sync.WaitGroup + + limiter := netutil.NewLimiter(a.MaxConns) + + // Use a common pipeline to use a single instance of each handler, + // instead of making two nearly identical pipelines + commonHandlerPipeline, err := a.buildHandlerPipeline() + if err != nil { + log.WithError(err).Fatal("Unable to configure pipeline") + } + + proxyHandler := a.proxyInitialMiddleware(ghandlers.ProxyHeaders(commonHandlerPipeline)) + + httpHandler := a.httpInitialMiddleware(commonHandlerPipeline) + + // Listen for HTTP + for _, fd := range a.ListenHTTP { + a.listenHTTPFD(&wg, fd, httpHandler, limiter) + } + + // Listen for HTTPS + for _, fd := range a.ListenHTTPS { + a.listenHTTPSFD(&wg, fd, httpHandler, limiter) + } + + // Listen for HTTP proxy requests + for _, fd := range a.ListenProxy { + a.listenProxyFD(&wg, fd, proxyHandler, limiter) + } + + // Listen for HTTPS PROXYv2 requests + for _, fd := range a.ListenHTTPSProxyv2 { + a.ListenHTTPSProxyv2FD(&wg, fd, httpHandler, limiter) + } + + // Serve metrics for Prometheus + if a.ListenMetrics != 0 { + a.listenMetricsFD(&wg, a.ListenMetrics) + } + + a.domains.Read(a.Domain) + + wg.Wait() +} + +func (a *theApp) listenHTTPFD(wg *sync.WaitGroup, fd uintptr, httpHandler http.Handler, limiter *netutil.Limiter) { + wg.Add(1) + go func() { + defer wg.Done() + err := listenAndServe(fd, httpHandler, a.HTTP2, nil, limiter, false) + if err != nil { + capturingFatal(err, errortracking.WithField("listener", request.SchemeHTTP)) + } + }() +} + +func (a *theApp) listenHTTPSFD(wg *sync.WaitGroup, fd uintptr, httpHandler http.Handler, limiter *netutil.Limiter) { + wg.Add(1) + go func() { + defer wg.Done() + tlsConfig, err := a.TLSConfig() + if err != nil { + capturingFatal(err, errortracking.WithField("listener", request.SchemeHTTPS)) + } + + err = listenAndServe(fd, httpHandler, a.HTTP2, tlsConfig, limiter, false) + if err != nil { + capturingFatal(err, errortracking.WithField("listener", request.SchemeHTTPS)) + } + }() +} + +func (a *theApp) listenProxyFD(wg *sync.WaitGroup, fd uintptr, proxyHandler http.Handler, limiter *netutil.Limiter) { + wg.Add(1) + go func() { + wg.Add(1) + go func(fd uintptr) { + defer wg.Done() + err := listenAndServe(fd, proxyHandler, a.HTTP2, nil, limiter, false) + if err != nil { + capturingFatal(err, errortracking.WithField("listener", "http proxy")) + } + }(fd) + }() +} + +// https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt +func (a *theApp) ListenHTTPSProxyv2FD(wg *sync.WaitGroup, fd uintptr, httpHandler http.Handler, limiter *netutil.Limiter) { + wg.Add(1) + go func() { + defer wg.Done() + tlsConfig, err := a.TLSConfig() + if err != nil { + capturingFatal(err, errortracking.WithField("listener", request.SchemeHTTPS)) + } + + err = listenAndServe(fd, httpHandler, a.HTTP2, tlsConfig, limiter, true) + if err != nil { + capturingFatal(err, errortracking.WithField("listener", request.SchemeHTTPS)) + } + }() +} + +func (a *theApp) listenMetricsFD(wg *sync.WaitGroup, fd uintptr) { + wg.Add(1) + go func() { + defer wg.Done() + + l, err := net.FileListener(os.NewFile(fd, "[socket]")) + if err != nil { + capturingFatal(fmt.Errorf("failed to listen on FD %d: %v", fd, err), errortracking.WithField("listener", "metrics")) + } + + monitoringOpts := []monitoring.Option{ + monitoring.WithBuildInformation(VERSION, ""), + monitoring.WithListener(l), + } + + err = monitoring.Start(monitoringOpts...) + if err != nil { + capturingFatal(err, errortracking.WithField("listener", "metrics")) + } + }() +} + +func runApp(config appConfig) { + domains, err := source.NewDomains(config) + if err != nil { + log.WithError(err).Fatal("could not create domains config source") + } + + a := theApp{appConfig: config, domains: domains} + + err = logging.ConfigureLogging(a.LogFormat, a.LogVerbose) + if err != nil { + log.WithError(err).Fatal("Failed to initialize logging") + } + + if config.ArtifactsServer != "" { + a.Artifact = artifact.New(config.ArtifactsServer, config.ArtifactsServerTimeout, config.Domain) + } + + a.setAuth(config) + + a.Handlers = handlers.New(a.Auth, a.Artifact) + + if config.GitLabServer != "" { + a.AcmeMiddleware = &acme.Middleware{GitlabURL: config.GitLabServer} + } + + if len(config.CustomHeaders) != 0 { + customHeaders, err := middleware.ParseHeaderString(config.CustomHeaders) + if err != nil { + log.WithError(err).Fatal("Unable to parse header string") + } + a.CustomHeaders = customHeaders + } + + if err := mimedb.LoadTypes(); err != nil { + log.WithError(err).Warn("Loading extended MIME database failed") + } + + c := &cfg.Config{ + Zip: &cfg.ZipServing{ + ExpirationInterval: config.ZipCacheExpiry, + CleanupInterval: config.ZipCacheCleanup, + RefreshInterval: config.ZipCacheRefresh, + OpenTimeout: config.ZipeOpenTimeout, + }, + } + + // TODO: reconfigure all VFS' + // https://gitlab.com/gitlab-org/gitlab-pages/-/issues/512 + if err := zip.Instance().Reconfigure(c); err != nil { + fatal(err, "failed to reconfigure zip VFS") + } + + a.Run() +} + +func (a *theApp) setAuth(config appConfig) { + if config.ClientID == "" { + return + } + + var err error + a.Auth, err = auth.New(config.Domain, config.StoreSecret, config.ClientID, config.ClientSecret, + config.RedirectURI, config.GitLabServer) + if err != nil { + log.WithError(err).Fatal("could not initialize auth package") + } +} + +// fatal will log a fatal error and exit. +func fatal(err error, message string) { + log.WithError(err).Fatal(message) +} + +func (a *theApp) TLSConfig() (*tls.Config, error) { + return tlsconfig.Create(a.RootCertificate, a.RootKey, a.ServeTLS, + a.InsecureCiphers, a.TLSMinVersion, a.TLSMaxVersion) +} diff --git a/app_config.go b/app_config.go new file mode 100644 index 000000000..0dd192d5d --- /dev/null +++ b/app_config.go @@ -0,0 +1,71 @@ +package main + +import "time" + +type appConfig struct { + Domain string + ArtifactsServer string + ArtifactsServerTimeout int + RootCertificate []byte + RootKey []byte + MaxConns int + + ListenHTTP []uintptr + ListenHTTPS []uintptr + ListenProxy []uintptr + ListenHTTPSProxyv2 []uintptr + ListenMetrics uintptr + InsecureCiphers bool + TLSMinVersion uint16 + TLSMaxVersion uint16 + + HTTP2 bool + RedirectHTTP bool + StatusPath string + + DisableCrossOriginRequests bool + + LogFormat string + LogVerbose bool + + StoreSecret string + GitLabServer string + InternalGitLabServer string + GitLabAPISecretKey []byte + GitlabClientHTTPTimeout time.Duration + GitlabJWTTokenExpiration time.Duration + DomainConfigurationSource string + ClientID string + ClientSecret string + RedirectURI string + SentryDSN string + SentryEnvironment string + CustomHeaders []string + + ZipCacheExpiry time.Duration + ZipCacheRefresh time.Duration + ZipCacheCleanup time.Duration + ZipeOpenTimeout time.Duration +} + +// InternalGitLabServerURL returns URL to a GitLab instance. +func (config appConfig) InternalGitLabServerURL() string { + return config.InternalGitLabServer +} + +// GitlabClientSecret returns GitLab server access token. +func (config appConfig) GitlabAPISecret() []byte { + return config.GitLabAPISecretKey +} + +func (config appConfig) GitlabClientConnectionTimeout() time.Duration { + return config.GitlabClientHTTPTimeout +} + +func (config appConfig) GitlabJWTTokenExpiry() time.Duration { + return config.GitlabJWTTokenExpiration +} + +func (config appConfig) DomainConfigSource() string { + return config.DomainConfigurationSource +} diff --git a/app_test.go b/app_test.go new file mode 100644 index 000000000..f35e90c54 --- /dev/null +++ b/app_test.go @@ -0,0 +1,115 @@ +package main + +import ( + "crypto/tls" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/request" + "gitlab.com/gitlab-org/gitlab-pages/internal/source" +) + +func Test_setRequestScheme(t *testing.T) { + tests := []struct { + name string + r *http.Request + expectedScheme string + }{ + { + name: "http", + r: newGetRequestWithScheme(t, request.SchemeHTTP, false), + expectedScheme: request.SchemeHTTP, + }, + { + name: "https", + r: newGetRequestWithScheme(t, request.SchemeHTTPS, true), + expectedScheme: request.SchemeHTTPS, + }, + { + name: "empty_scheme_no_tls", + r: newGetRequestWithScheme(t, "", false), + expectedScheme: request.SchemeHTTP, + }, + { + name: "empty_scheme_with_tls", + r: newGetRequestWithScheme(t, "", true), + expectedScheme: request.SchemeHTTPS, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := setRequestScheme(tt.r) + require.Equal(t, got.URL.Scheme, tt.expectedScheme) + }) + } +} + +func newGetRequestWithScheme(t *testing.T, scheme string, withTLS bool) *http.Request { + t.Helper() + + req, err := http.NewRequest("GET", fmt.Sprintf("%s//localost/", scheme), nil) + require.NoError(t, err) + req.URL.Scheme = scheme + if withTLS { + req.TLS = &tls.ConnectionState{} + } + + return req +} + +func TestHealthCheckMiddleware(t *testing.T) { + tests := []struct { + name string + path string + status int + body string + }{ + { + name: "Not a healthcheck request", + path: "/foo/bar", + status: http.StatusOK, + body: "Hello from inner handler", + }, + { + name: "Healthcheck request", + path: "/-/healthcheck", + status: http.StatusServiceUnavailable, + body: "not yet ready\n", + }, + } + + app := theApp{ + appConfig: appConfig{ + StatusPath: "/-/healthcheck", + DomainConfigurationSource: "auto", + }, + } + + domains, err := source.NewDomains(app.appConfig) + require.NoError(t, err) + app.domains = domains + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + io.WriteString(w, "Hello from inner handler") + }) + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + r := httptest.NewRequest("GET", tc.path, nil) + rr := httptest.NewRecorder() + + middleware, err := app.healthCheckMiddleware(handler) + require.NoError(t, err) + middleware.ServeHTTP(rr, r) + + require.Equal(t, tc.status, rr.Code) + require.Equal(t, tc.body, rr.Body.String()) + }) + } +} diff --git a/config_test.go b/config_test.go new file mode 100644 index 000000000..dc200d1c0 --- /dev/null +++ b/config_test.go @@ -0,0 +1,54 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGitLabServerFromFlags(t *testing.T) { + tests := []struct { + name string + gitLabServer string + gitLabAuthServer string + artifactsServer string + expected string + }{ + { + name: "When gitLabServer is set", + gitLabServer: "https://gitlabserver.com", + gitLabAuthServer: "https://authserver.com", + artifactsServer: "https://artifactsserver.com", + expected: "https://gitlabserver.com", + }, + { + name: "When auth server is set", + gitLabServer: "", + gitLabAuthServer: "https://authserver.com", + artifactsServer: "https://artifactsserver.com", + expected: "https://authserver.com", + }, + { + name: "When only artifacts server is set", + gitLabServer: "", + gitLabAuthServer: "", + artifactsServer: "https://artifactsserver.com", + expected: "https://artifactsserver.com", + }, + { + name: "When only artifacts server includes path", + gitLabServer: "", + gitLabAuthServer: "", + artifactsServer: "https://artifactsserver.com:8080/api/path", + expected: "https://artifactsserver.com:8080", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + gitLabServer = &test.gitLabServer + gitLabAuthServer = &test.gitLabAuthServer + artifactsServer = &test.artifactsServer + require.Equal(t, test.expected, gitlabServerFromFlags()) + }) + } +} diff --git a/daemon.go b/daemon.go new file mode 100644 index 000000000..bf0472bbd --- /dev/null +++ b/daemon.go @@ -0,0 +1,358 @@ +package main + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/signal" + "strings" + "syscall" + + "github.com/kardianos/osext" + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/gitlab-pages/internal/jail" +) + +const ( + daemonRunProgram = "gitlab-pages-unprivileged" + + pagesRootInChroot = "/pages" +) + +func daemonMain() { + if os.Args[0] != daemonRunProgram { + return + } + + // Validate that a working directory is valid + // https://man7.org/linux/man-pages/man2/getcwd.2.html + wd, err := os.Getwd() + if err != nil { + fatal(err, "could not get current working directory") + } else if strings.HasPrefix(wd, "(unreachable)") { + fatal(os.ErrPermission, "could not get current working directory") + } + + log.WithFields(log.Fields{ + "uid": syscall.Getuid(), + "gid": syscall.Getgid(), + "wd": wd, + }).Info("starting the daemon as unprivileged user") + + // read the configuration from the pipe "ExtraFiles" + var config appConfig + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&config); err != nil { + fatal(err, "could not decode app config") + } + runApp(config) + os.Exit(0) +} + +func daemonReexec(uid, gid uint, args ...string) (cmd *exec.Cmd, err error) { + path, err := osext.Executable() + if err != nil { + return + } + + cmd = &exec.Cmd{ + Path: path, + Args: args, + Env: os.Environ(), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + SysProcAttr: &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: uint32(uid), + Gid: uint32(gid), + }, + Setsid: true, + }, + } + return +} + +func daemonUpdateFd(cmd *exec.Cmd, fd uintptr) (childFd uintptr) { + file := os.NewFile(fd, "[socket]") + + // we add 3 since, we have a 3 predefined FDs + childFd = uintptr(3 + len(cmd.ExtraFiles)) + cmd.ExtraFiles = append(cmd.ExtraFiles, file) + + return +} + +func daemonUpdateFds(cmd *exec.Cmd, fds []uintptr) { + for idx, fd := range fds { + fds[idx] = daemonUpdateFd(cmd, fd) + } +} + +func killProcess(cmd *exec.Cmd) { + if cmd.Process != nil { + cmd.Process.Kill() + } + cmd.Wait() + for _, file := range cmd.ExtraFiles { + file.Close() + } +} + +func passSignals(cmd *exec.Cmd) { + if cmd.Process == nil { + return + } + + s := make(chan os.Signal) + signal.Notify(s, syscall.SIGTERM, os.Interrupt, os.Kill) + + go func() { + for cmd.Process != nil { + cmd.Process.Signal(<-s) + } + }() +} + +func chrootDaemon(cmd *exec.Cmd) (*jail.Jail, error) { + wd, err := os.Getwd() + if err != nil { + return nil, err + } + + chroot := jail.Into(wd) + + // Generate a probabilistically-unique suffix for the copy of the pages + // binary being placed into the chroot + suffix := make([]byte, 16) + if _, err := rand.Read(suffix); err != nil { + return nil, err + } + + tempExecutablePath := fmt.Sprintf("/.daemon.%x", suffix) + + if err := chroot.CopyTo(tempExecutablePath, cmd.Path); err != nil { + return nil, err + } + + // Update command to use chroot + cmd.SysProcAttr.Chroot = chroot.Path() + cmd.Path = tempExecutablePath + cmd.Dir = "/" + + return chroot, nil +} + +func jailCopyCertDir(cage *jail.Jail, sslCertDir, jailCertsDir string) error { + log.WithFields(log.Fields{ + "ssl-cert-dir": sslCertDir, + }).Debug("Copying certs from SSL_CERT_DIR") + + entries, err := ioutil.ReadDir(sslCertDir) + if err != nil { + return fmt.Errorf("failed to read SSL_CERT_DIR: %+v", err) + } + + for _, fi := range entries { + // Copy only regular files and symlinks + mode := fi.Mode() + if !(mode.IsRegular() || mode&os.ModeSymlink != 0) { + continue + } + + err = cage.CopyTo(jailCertsDir+"/"+fi.Name(), sslCertDir+"/"+fi.Name()) + if err != nil { + log.WithError(err).Errorf("failed to copy cert: %q", fi.Name()) + // Go on and try to copy other files. We don't want the whole + // startup process to fail due to a single failure here. + } + } + + return nil +} + +func jailDaemonCerts(cmd *exec.Cmd, cage *jail.Jail) error { + sslCertFile := os.Getenv("SSL_CERT_FILE") + sslCertDir := os.Getenv("SSL_CERT_DIR") + if sslCertFile == "" && sslCertDir == "" { + log.Warn("Neither SSL_CERT_FILE nor SSL_CERT_DIR environment variable is set. HTTPS requests will fail.") + return nil + } + + // This assumes cage.MkDir("/etc") has already been called + cage.MkDir("/etc/ssl", 0755) + + // Copy SSL_CERT_FILE inside the jail + if sslCertFile != "" { + jailCertsFile := "/etc/ssl/ca-bundle.pem" + err := cage.CopyTo(jailCertsFile, sslCertFile) + if err != nil { + return fmt.Errorf("failed to copy SSL_CERT_FILE: %+v", err) + } + cmd.Env = append(cmd.Env, "SSL_CERT_FILE="+jailCertsFile) + } + + // Copy all files and symlinks from SSL_CERT_DIR into the jail + if sslCertDir != "" { + jailCertsDir := "/etc/ssl/certs" + cage.MkDir(jailCertsDir, 0755) + err := jailCopyCertDir(cage, sslCertDir, jailCertsDir) + if err != nil { + return err + } + cmd.Env = append(cmd.Env, "SSL_CERT_DIR="+jailCertsDir) + } + + return nil +} + +func jailCreate(cmd *exec.Cmd) (*jail.Jail, error) { + cage := jail.CreateTimestamped("gitlab-pages", 0755) + + // Add /dev/urandom and /dev/random inside the jail. This is required to + // support Linux versions < 3.17, which do not have the getrandom() syscall + cage.MkDir("/dev", 0755) + if err := cage.CharDev("/dev/urandom"); err != nil { + return nil, err + } + + if err := cage.CharDev("/dev/random"); err != nil { + return nil, err + } + + // Add gitlab-pages inside the jail + err := cage.CopyTo("/gitlab-pages", cmd.Path) + if err != nil { + return nil, err + } + + // Add /etc/resolv.conf and /etc/hosts inside the jail + cage.MkDir("/etc", 0755) + err = cage.Copy("/etc/resolv.conf") + if err != nil { + return nil, err + } + err = cage.Copy("/etc/hosts") + if err != nil { + return nil, err + } + + // Add certificates inside the jail + err = jailDaemonCerts(cmd, cage) + if err != nil { + return nil, err + } + + return cage, nil +} + +func jailDaemon(cmd *exec.Cmd) (*jail.Jail, error) { + cage, err := jailCreate(cmd) + if err != nil { + return nil, err + } + + wd, err := os.Getwd() + if err != nil { + return nil, err + } + + // Bind mount shared folder + cage.MkDir(pagesRootInChroot, 0755) + cage.Bind(pagesRootInChroot, wd) + + // Update command to use chroot + cmd.SysProcAttr.Chroot = cage.Path() + cmd.Path = "/gitlab-pages" + cmd.Dir = pagesRootInChroot + + return cage, nil +} + +func daemonize(config appConfig, uid, gid uint, inPlace bool) error { + log.WithFields(log.Fields{ + "uid": uid, + "gid": gid, + "in-place": inPlace, + }).Info("running the daemon as unprivileged user") + + cmd, err := daemonReexec(uid, gid, daemonRunProgram) + if err != nil { + return err + } + defer killProcess(cmd) + + // Run daemon in chroot environment + var wrapper *jail.Jail + if inPlace { + wrapper, err = chrootDaemon(cmd) + } else { + wrapper, err = jailDaemon(cmd) + } + if err != nil { + log.WithError(err).Print("chroot failed") + return err + } + defer wrapper.Dispose() + + // Unshare mount namespace + // 1. If this fails, in a worst case changes to mounts will propagate to other processes + // 2. Ensures that jail mount is not propagated to the parent mount namespace + // to avoid populating `tmp` directory with old mounts + _ = wrapper.Unshare() + + if err := wrapper.Build(); err != nil { + log.WithError(err).Print("chroot build failed") + return err + } + + // Create a pipe to pass the configuration + configReader, configWriter, err := os.Pipe() + if err != nil { + return err + } + defer configWriter.Close() + cmd.ExtraFiles = append(cmd.ExtraFiles, configReader) + + updateFds(&config, cmd) + + // Start the process + if err := cmd.Start(); err != nil { + log.WithError(err).Error("start failed") + return err + } + + // Write the configuration + if err := json.NewEncoder(configWriter).Encode(config); err != nil { + return err + } + configWriter.Close() + + // Pass through signals + passSignals(cmd) + + // Wait for process to exit + return cmd.Wait() +} + +func updateFds(config *appConfig, cmd *exec.Cmd) { + for _, fds := range [][]uintptr{ + config.ListenHTTP, + config.ListenHTTPS, + config.ListenProxy, + config.ListenHTTPSProxyv2, + } { + daemonUpdateFds(cmd, fds) + } + + for _, fdPtr := range []*uintptr{ + &config.ListenMetrics, + } { + if *fdPtr != 0 { + *fdPtr = daemonUpdateFd(cmd, *fdPtr) + } + } +} diff --git a/doc/dependency_decisions.yml b/doc/dependency_decisions.yml new file mode 100644 index 000000000..4137ba12f --- /dev/null +++ b/doc/dependency_decisions.yml @@ -0,0 +1,133 @@ +--- +- - :license + - github.com/beorn7/perks/quantile + - MIT + - :who: + :why: + :versions: [] + :when: 2019-04-03 13:58:37.693164000 Z +- - :license + - github.com/matttproud/golang_protobuf_extensions/pbutil + - Apache 2.0 + - :who: + :why: + :versions: [] + :when: 2019-04-03 15:08:10.359320000 Z +- - :license + - github.com/karrick/godirwalk + - BSD-2-Clause + - :who: + :why: + :versions: [] + :when: 2019-04-03 15:09:35.519709000 Z +- - :license + - github.com/pkg/errors + - BSD-2-Clause + - :who: + :why: + :versions: [] + :when: 2019-04-03 15:10:49.901903000 Z +- - :license + - github.com/prometheus/client_golang/prometheus + - Apache-2.0 + - :who: + :why: + :versions: [] + :when: 2019-04-03 15:11:57.417366000 Z +- - :license + - github.com/prometheus/client_model/go + - Apache-2.0 + - :who: + :why: + :versions: [] + :when: 2019-04-03 15:14:16.841551000 Z +- - :license + - gitlab.com/gitlab-org/gitaly/auth + - MIT + - :who: + :why: + :versions: [] + :when: 2019-04-03 15:15:26.240245000 Z +- - :license + - gitlab.com/gitlab-org/gitlab-pages-proto/go + - MIT + - :who: + :why: + :versions: [] + :when: 2019-04-03 15:16:18.620931000 Z +- - :license + - google.golang.org/genproto/googleapis/rpc/status + - Apache-2.0 + - :who: + :why: + :versions: [] + :when: 2019-04-03 15:19:19.394529000 Z +- - :license + - golang.org/x/crypto/ssh/terminal + - BSD-3-clause + - :who: + :why: + :versions: [] + :when: 2019-04-05 10:26:09.636346000 Z +- - :ignore + - github.com/certifi/gocertifi + - :who: + :why: + :versions: [] + :when: 2019-07-10 17:01:35.894437233 Z +- - :license + - github.com/go-logfmt/logfmt + - MIT + - :who: + :why: + :versions: [] + :when: 2019-09-10 14:57:15.905705202 Z +- - :license + - github.com/pmezard/go-difflib + - BSD-3-Clause + - :who: + :why: + :versions: [] + :when: 2019-09-10 14:59:14.300178506 Z +- - :license + - github.com/gogo/protobuf + - BSD-3-clause + - :who: Ben Kochie + :why: https://github.com/gogo/protobuf/blob/master/LICENSE + :versions: [] + :when: 2019-09-11 12:57:17.184823077 Z +- - :license + - github.com/modern-go/concurrent + - Apache-2.0 + - :who: Ben Kochie + :why: https://github.com/modern-go/concurrent/blob/master/LICENSE + :versions: [] + :when: 2019-09-11 12:58:04.927007992 Z +- - :license + - github.com/modern-go/reflect2 + - Apache-2.0 + - :who: Ben Kochie + :why: https://github.com/modern-go/reflect2/blob/master/LICENSE + :versions: [] + :when: 2019-09-11 12:58:33.840590099 Z +- - :license + - github.com/wadey/gocovmerge + - BSD-2 + - :who: + :why: + :versions: [] + :when: 2019-09-17 14:53:25.294515000 Z +- - :license + - gopkg.in/check.v1 + - BSD-2-Clause + - :who: Krasimir Angelov + :why: https://github.com/go-check/check/blob/e54ca221ea41951970e0249fb5163642c915dbb2/LICENSE + :versions: [] + :when: 2019-10-08 02:12:00.000000000 Z +- - :license + - github.com/kr/pretty + - MIT + - :who: Krasimir Angelov + :why: https://github.com/kr/pretty/blob/088c856450c08c03eb32f7a6c221e6eefaa10e6f/License + :versions: [] + :when: 2019-10-08 02:12:00.000000000 Z diff --git a/doc/development.md b/doc/development.md new file mode 100644 index 000000000..e329b105c --- /dev/null +++ b/doc/development.md @@ -0,0 +1,81 @@ +# Getting started with development + +If you want to develop GitLab Pages with the GDK, follow [these instructions](https://gitlab.com/gitlab-org/gitlab-development-kit/blob/master/doc/howto/pages.md). + +You can also run and develop GitLab Pages outside of the GDK. Here is a few commands and host file changes to get you running with the examples built into the repo. + +Create `gitlab-pages.conf` in the root of this project: + +``` +# Replace `192.168.1.135` with your own local IP +pages-domain=192.168.1.135.nip.io +pages-root=shared/pages +listen-http=:8090 +# WARNING: to be deprecated in https://gitlab.com/gitlab-org/gitlab-pages/-/issues/382 +domain-config-source=disk +log-verbose=true +``` + +Build and start the app. For any changes, you must run `make` to build the app, so it's best to just always run it before you start the app. It's quick to build so don't worry! + +```sh +make && ./gitlab-pages -config=gitlab-pages.conf +``` + +Visit http://group.192.168.1.135.nip.io:8090/project/index.html (replace `192.168.1.135` with your IP) and you should see a +`project-subdir` response + +You can see our [testing](#testing) and [linting](#linting) sections below on how to run those. + +### I don't want to use `nip.io` + +If you don't want to use `nip.io` for the wildcard DNS, you can use one of these methods. + +A simple alternative is to add a `/etc/hosts` entry pointing from `localhost`/`127.0.0.1` to the directory subdomain for any directory under `shared/pages/`. +This is because `/etc/hosts` does not support wildcard hostnames. + +``` +127.0.0.1 pages.gdk.test +# You will need to an entry for every domain/group you want to access +127.0.0.1 group.pages.gdk.test +``` + +An alternative is to use [`dnsmasq`](https://wiki.debian.org/dnsmasq) to handle wildcard hostnames. + + +## Linting + +```sh +# Get everything installed and setup (you only need to run this once) +# If you run into problems running the linting process, +# you may have to run `sudo rm -rf .GOPATH` and try this step again +make setup + +# Run the linter locally +make lint +``` + +## Testing + +To run tests, you can use these commands: + +```sh +# This will run all of the tests in the codebase +make test + +# Run a specfic test file +go test ./internal/serving/disk/ + +# Run a specific test in a file +go test ./internal/serving/disk/ -run TestDisk_ServeFileHTTP + +# Run all unit tests except acceptance_test.go +go test ./... -short + +# Run acceptance_test.go only +make acceptance +# Run specific acceptance tests +# We add `make` here because acceptance tests use the last binary that was compiled, +# so we want to have the latest changes in the build that is tested +make && go test ./ -run TestRedirect +``` diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..f06ea125b --- /dev/null +++ b/go.mod @@ -0,0 +1,42 @@ +module gitlab.com/gitlab-org/gitlab-pages + +go 1.13 + +require ( + github.com/andybalholm/brotli v1.0.1 + github.com/cenkalti/backoff/v4 v4.0.2 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/fzipp/gocyclo v0.0.0-20150627053110-6acd4345c835 + github.com/golang/mock v1.3.1 + github.com/gorilla/context v1.1.1 + github.com/gorilla/handlers v1.4.2 + github.com/gorilla/securecookie v1.1.1 + github.com/gorilla/sessions v1.2.0 + github.com/jstemmer/go-junit-report v0.9.1 + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 + github.com/karlseguin/ccache/v2 v2.0.6 + github.com/karrick/godirwalk v1.10.12 + github.com/kr/text v0.2.0 // indirect + github.com/namsral/flag v1.7.4-pre + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/pires/go-proxyproto v0.2.0 + github.com/prometheus/client_golang v1.6.0 + github.com/rs/cors v1.7.0 + github.com/sirupsen/logrus v1.7.0 + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.6.1 + github.com/tj/assert v0.0.3 // indirect + github.com/tj/go-redirects v0.0.0-20180508180010-5c02ead0bbc5 + github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce + github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad + gitlab.com/gitlab-org/go-mimedb v1.45.0 + gitlab.com/gitlab-org/labkit v1.3.0 + golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 + golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f + golang.org/x/net v0.0.0-20200226121028-0de0cce0169b + golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f + golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770 + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect + honnef.co/go/tools v0.0.1-2020.1.3 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..672bda11e --- /dev/null +++ b/go.sum @@ -0,0 +1,553 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0 h1:0E3eE8MX426vUOs7aHfI7aN1BrIzzzf4ccKCSfSjGmc= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= +github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andybalholm/brotli v1.0.1 h1:KqhlKozYbRtJvsPrrEeXcO+N2l6NYT5A2QAFmSULpEc= +github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4eamEDs= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/client9/reopen v1.0.0 h1:8tpLVR74DLpLObrn2KvsyxJY++2iORGR17WLUdSzUws= +github.com/client9/reopen v1.0.0/go.mod h1:caXVCEr+lUtoN1FlsRiOWdfQtdRHIYfcb0ai8qKWtkQ= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fzipp/gocyclo v0.0.0-20150627053110-6acd4345c835 h1:roDmqJ4Qes7hrDOsWsMCce0vQHz3xiMPjJ9m4c2eeNs= +github.com/fzipp/gocyclo v0.0.0-20150627053110-6acd4345c835/go.mod h1:BjL/N0+C+j9uNX+1xcNuM9vdSIcXCZrQZUYbXOFbgN8= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/getsentry/sentry-go v0.7.0 h1:MR2yfR4vFfv/2+iBuSnkdQwVg7N9cJzihZ6KJu7srwQ= +github.com/getsentry/sentry-go v0.7.0/go.mod h1:pLFpD2Y5RHIKF9Bw3KH6/68DeN2K/XBJd8awjdPnUwg= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc h1:DLpL8pWq0v4JYoRpEhDfsJhhJyGKCcQM2WPW2TJs31c= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ= +github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/karlseguin/ccache/v2 v2.0.6 h1:jFCLz4bF4EPfuCcvESAgYNClkEb31LV3WzyOwLlFz7w= +github.com/karlseguin/ccache/v2 v2.0.6/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= +github.com/karrick/godirwalk v1.10.12 h1:BqUm+LuJcXjGv1d2mj3gBiQyrQ57a0rYoAmhvJQ7RDU= +github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= +github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= +github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= +github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/lightstep/lightstep-tracer-go v0.15.6/go.mod h1:6AMpwZpsyCFwSovxzM78e+AsYxE8sGwiM6C3TytaWeI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= +github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/namsral/flag v1.7.4-pre h1:b2ScHhoCUkbsq0d2C15Mv+VU8bl8hAXV8arnWiOHNZs= +github.com/namsral/flag v1.7.4-pre/go.mod h1:OXldTctbM6SWH1K899kPZcf65KxJiD7MsceFUpB5yDo= +github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/ulid/v2 v2.0.2 h1:r4fFzBm+bv0wNKNh5eXTwU7i85y5x+uwkxCUTNVQqLc= +github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pires/go-proxyproto v0.2.0 h1:WyYKlv9pkt77b+LjMvPfwrsAxviaGCFhG4KDIy1ofLY= +github.com/pires/go-proxyproto v0.2.0/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sebest/xff v0.0.0-20160910043805-6c115e0ffa35 h1:eajwn6K3weW5cd1ZXLu2sJ4pvwlBiCWY4uDejOr73gM= +github.com/sebest/xff v0.0.0-20160910043805-6c115e0ffa35/go.mod h1:wozgYq9WEBQBaIJe4YZ0qTSFAMxmcwBhQH0fO0R34Z0= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-redirects v0.0.0-20180508180010-5c02ead0bbc5 h1:1gWKekoYJSrFfE3r+Q4kfV+DkOWpwH+DHTFZvbbaelQ= +github.com/tj/go-redirects v0.0.0-20180508180010-5c02ead0bbc5/go.mod h1:E0E2H2gQA+uoi27VCSU+a/BULPtadQA78q3cpTjZbZw= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad h1:W0LEBv82YCGEtcmPA3uNZBI33/qF//HAAs3MawDjRa0= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +gitlab.com/gitlab-org/go-mimedb v1.45.0 h1:PO8dx6HEWzPYU6MQTYnCbpQEJzhJLW/Bh43+2VUHTgc= +gitlab.com/gitlab-org/go-mimedb v1.45.0/go.mod h1:wa9y/zOSFKmTXLyBs4clz2FNVhZQmmEQM9TxslPAjZ0= +gitlab.com/gitlab-org/labkit v1.3.0 h1:PDP4id5YEvw6juWrGE88LcTtEridtRAOyvNvUOtcc9o= +gitlab.com/gitlab-org/labkit v1.3.0/go.mod h1:nohrYTSLDnZix0ebXZrbZJjymRar8HeV2roWL5/jw2U= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c h1:2EA2K0k9bcvvEDlqD8xdlOhCOqq+O/p9Voqi4x9W1YU= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770 h1:M9Fif0OxNji8w+HvmhVQ8KJtiZOsjU9RgslJGhn95XE= +golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZtOB4dLwfK4u+EVRMvP+e9zKkg2grM= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +gopkg.in/DataDog/dd-trace-go.v1 v1.7.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/helpers.go b/helpers.go new file mode 100644 index 000000000..d71f2b2c4 --- /dev/null +++ b/helpers.go @@ -0,0 +1,46 @@ +package main + +import ( + "io/ioutil" + "net" + "os" + + "gitlab.com/gitlab-org/labkit/errortracking" +) + +func readFile(file string) (result []byte) { + result, err := ioutil.ReadFile(file) + if err != nil { + fatal(err, "could not read file") + } + return +} + +// Be careful: if you let either of the return values get garbage +// collected by Go they will be closed automatically. +func createSocket(addr string) (net.Listener, *os.File) { + l, err := net.Listen("tcp", addr) + if err != nil { + fatal(err, "could not create socket") + } + + return l, fileForListener(l) +} + +func fileForListener(l net.Listener) *os.File { + type filer interface { + File() (*os.File, error) + } + + f, err := l.(filer).File() + if err != nil { + fatal(err, "could not find file for listener") + } + + return f +} + +func capturingFatal(err error, fields ...errortracking.CaptureOption) { + errortracking.Capture(err, fields...) + fatal(err, "capturing fatal") +} diff --git a/internal/acme/acme.go b/internal/acme/acme.go new file mode 100644 index 000000000..3bfa8f2e0 --- /dev/null +++ b/internal/acme/acme.go @@ -0,0 +1,62 @@ +package acme + +import ( + "net/http" + "net/url" + "path/filepath" + "strings" + + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/gitlab-pages/internal/host" +) + +// Middleware handles acme challenges by redirecting them to GitLab instance +type Middleware struct { + GitlabURL string +} + +// Domain interface represent D from domain package +type Domain interface { + ServeFileHTTP(w http.ResponseWriter, r *http.Request) bool +} + +// ServeAcmeChallenges identifies if request is acme-challenge and redirects to GitLab in that case +func (m *Middleware) ServeAcmeChallenges(w http.ResponseWriter, r *http.Request, domain Domain) bool { + if m == nil { + return false + } + + if !isAcmeChallenge(r.URL.Path) { + return false + } + + if domain.ServeFileHTTP(w, r) { + return false + } + + return m.redirectToGitlab(w, r) +} + +func isAcmeChallenge(path string) bool { + return strings.HasPrefix(filepath.Clean(path), "/.well-known/acme-challenge/") +} + +func (m *Middleware) redirectToGitlab(w http.ResponseWriter, r *http.Request) bool { + redirectURL, err := url.Parse(m.GitlabURL) + if err != nil { + log.WithError(err).Error("Can't parse GitLab URL for acme challenge redirect") + return false + } + + redirectURL.Path = "/-/acme-challenge" + query := redirectURL.Query() + query.Set("domain", host.FromRequest(r)) + query.Set("token", filepath.Base(r.URL.Path)) + redirectURL.RawQuery = query.Encode() + + log.WithField("redirect_url", redirectURL).Debug("Redirecting to GitLab for processing acme challenge") + + http.Redirect(w, r, redirectURL.String(), http.StatusTemporaryRedirect) + return true +} diff --git a/internal/acme/acme_test.go b/internal/acme/acme_test.go new file mode 100644 index 000000000..ab191694f --- /dev/null +++ b/internal/acme/acme_test.go @@ -0,0 +1,58 @@ +package acme + +import ( + "net/http" + "testing" + + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" +) + +type domainStub struct { + hasAcmeChallenge bool +} + +func (d *domainStub) ServeFileHTTP(w http.ResponseWriter, r *http.Request) bool { + if r.URL.Path == "/.well-known/acme-challenge/token" { + return d.hasAcmeChallenge + } + + return false +} + +func serveAcmeOrNotFound(m *Middleware, domain Domain) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !m.ServeAcmeChallenges(w, r, domain) { + http.NotFound(w, r) + } + } +} + +const ( + baseURL = "http://example.com" + indexURL = baseURL + "/index.html" + challengeURL = baseURL + "/.well-known/acme-challenge/token" +) + +var ( + domainWithChallenge = &domainStub{hasAcmeChallenge: true} + domain = &domainStub{hasAcmeChallenge: false} + middleware = &Middleware{GitlabURL: "https://gitlab.example.com"} +) + +func TestServeAcmeChallengesNotConfigured(t *testing.T) { + testhelpers.AssertHTTP404(t, serveAcmeOrNotFound(nil, domain), "GET", challengeURL, nil, nil) +} + +func TestServeAcmeChallengeWhenPresent(t *testing.T) { + testhelpers.AssertHTTP404(t, serveAcmeOrNotFound(middleware, domainWithChallenge), "GET", challengeURL, nil, nil) +} + +func TestServeAcmeChallengeWhenMissing(t *testing.T) { + testhelpers.AssertRedirectTo( + t, serveAcmeOrNotFound(middleware, domain), + "GET", challengeURL, nil, + "https://gitlab.example.com/-/acme-challenge?domain=example.com&token=token", + ) + + testhelpers.AssertHTTP404(t, serveAcmeOrNotFound(middleware, domain), "GET", indexURL, nil, nil) +} diff --git a/internal/artifact/artifact.go b/internal/artifact/artifact.go new file mode 100644 index 000000000..922aeeee3 --- /dev/null +++ b/internal/artifact/artifact.go @@ -0,0 +1,185 @@ +package artifact + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "regexp" + "strconv" + "strings" + "time" + + "gitlab.com/gitlab-org/labkit/errortracking" + + "gitlab.com/gitlab-org/gitlab-pages/internal/httperrors" + "gitlab.com/gitlab-org/gitlab-pages/internal/httptransport" + "gitlab.com/gitlab-org/gitlab-pages/internal/logging" +) + +const ( + // Format a non-/-suffixed URL, an escaped project full_path, a job ID and + // a /-prefixed file path into an URL string + apiURLTemplate = "%s/projects/%s/jobs/%s/artifacts%s" + + minStatusCode = 200 + maxStatusCode = 299 + + createArtifactRequestErrMsg = "failed to create the artifact request" + artifactRequestErrMsg = "failed to request the artifact" +) + +var ( + // Captures subgroup + project, job ID and artifacts path + pathExtractor = regexp.MustCompile(`(?i)\A/-/(.*)/-/jobs/(\d+)/artifacts(/[^?]*)\z`) + errArtifactResponse = errors.New("artifact request response was not successful") +) + +// Artifact proxies requests for artifact files to the GitLab artifacts API +type Artifact struct { + server string + suffix string + client *http.Client +} + +// New when provided the arguments defined herein, returns a pointer to an +// Artifact that is used to proxy requests. +func New(server string, timeoutSeconds int, pagesDomain string) *Artifact { + return &Artifact{ + server: strings.TrimRight(server, "/"), + suffix: "." + strings.ToLower(pagesDomain), + client: &http.Client{ + Timeout: time.Second * time.Duration(timeoutSeconds), + Transport: httptransport.InternalTransport, + }, + } +} + +// TryMakeRequest will attempt to proxy a request and write it to the argument +// http.ResponseWriter, ultimately returning a bool that indicates if the +// http.ResponseWriter has been written to in any capacity. Additional handler func +// may be given which should return true if it did handle the response. +func (a *Artifact) TryMakeRequest(host string, w http.ResponseWriter, r *http.Request, token string, additionalHandler func(*http.Response) bool) bool { + if a == nil || a.server == "" || host == "" { + return false + } + + reqURL, ok := a.BuildURL(host, r.URL.Path) + if !ok { + return false + } + + a.makeRequest(w, r, reqURL, token, additionalHandler) + + return true +} + +func (a *Artifact) makeRequest(w http.ResponseWriter, r *http.Request, reqURL *url.URL, token string, additionalHandler func(*http.Response) bool) { + req, err := http.NewRequest("GET", reqURL.String(), nil) + if err != nil { + logging.LogRequest(r).WithError(err).Error(createArtifactRequestErrMsg) + errortracking.Capture(err, errortracking.WithRequest(r)) + httperrors.Serve500(w) + return + } + + if token != "" { + req.Header.Add("Authorization", "Bearer "+token) + } + resp, err := a.client.Do(req) + + if err != nil { + logging.LogRequest(r).WithError(err).Error(artifactRequestErrMsg) + errortracking.Capture(err, errortracking.WithRequest(r)) + httperrors.Serve502(w) + return + } + + if additionalHandler(resp) { + return + } + + if resp.StatusCode == http.StatusNotFound { + httperrors.Serve404(w) + return + } + + if resp.StatusCode == http.StatusInternalServerError { + logging.LogRequest(r).Error(errArtifactResponse) + errortracking.Capture(errArtifactResponse, errortracking.WithRequest(r)) + httperrors.Serve500(w) + return + } + + // we only cache responses within the 2xx series response codes and that were not private + if token == "" { + addCacheHeader(w, resp) + } + + w.Header().Set("Content-Type", resp.Header.Get("Content-Type")) + w.Header().Set("Content-Length", strconv.FormatInt(resp.ContentLength, 10)) + w.WriteHeader(resp.StatusCode) + io.Copy(w, resp.Body) +} + +func addCacheHeader(w http.ResponseWriter, resp *http.Response) { + if (resp.StatusCode >= minStatusCode) && (resp.StatusCode <= maxStatusCode) { + w.Header().Set("Cache-Control", "max-age=3600") + } +} + +// encodePathSegments separately encodes each segment of the path, as +// segments can have special characters in them, if the path is not valid +// and gets re-encoded by URL.Parse, %2f will get replaced with '/', +// breaking the namespace that we pass for group%2fproject. +// +// See https://github.com/golang/go/issues/6658 for more context +func encodePathSegments(path string) string { + parsed := strings.Split(path, "/") + + var encoded []string + for _, str := range parsed { + encoded = append(encoded, url.PathEscape(str)) + } + return strings.Join(encoded, "/") +} + +// BuildURL returns a pointer to a url.URL for where the request should be +// proxied to. The returned bool will indicate if there is some sort of issue +// with the url while it is being generated. +// +// The URL is generated from the host (which contains the top-level group and +// ends with the pagesDomain) and the path (which contains any subgroups, the +// project, a job ID and a path +// for the artifact file we want to download) +func (a *Artifact) BuildURL(host, requestPath string) (*url.URL, bool) { + if !strings.HasSuffix(strings.ToLower(host), a.suffix) { + return nil, false + } + + topGroup := host[0 : len(host)-len(a.suffix)] + + parts := pathExtractor.FindAllStringSubmatch(requestPath, 1) + if len(parts) != 1 || len(parts[0]) != 4 { + return nil, false + } + + restOfPath := strings.TrimLeft(strings.TrimRight(parts[0][1], "/"), "/") + if len(restOfPath) == 0 { + return nil, false + } + + jobID := parts[0][2] + artifactPath := encodePathSegments(parts[0][3]) + + projectID := url.PathEscape(path.Join(topGroup, restOfPath)) + generated := fmt.Sprintf(apiURLTemplate, a.server, projectID, jobID, artifactPath) + + u, err := url.Parse(generated) + if err != nil { + return nil, false + } + return u, true +} diff --git a/internal/artifact/artifact_test.go b/internal/artifact/artifact_test.go new file mode 100644 index 000000000..ab25d16f0 --- /dev/null +++ b/internal/artifact/artifact_test.go @@ -0,0 +1,277 @@ +package artifact_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/artifact" +) + +func TestTryMakeRequest(t *testing.T) { + content := "Title of the document" + contentType := "text/html; charset=utf-8" + testServer := makeArtifactServerStub(t, content, contentType) + defer testServer.Close() + + cases := []struct { + Path string + Token string + Status int + Content string + Length string + CacheControl string + ContentType string + Description string + }{ + { + "/200.html", + "", + http.StatusOK, + content, + "90", + "max-age=3600", + "text/html; charset=utf-8", + "basic successful request", + }, + { + "/200.html", + "token", + http.StatusOK, + content, + "90", + "", + "text/html; charset=utf-8", + "basic successful request", + }, + { + "/max-caching.html", + "", + http.StatusIMUsed, + content, + "90", + "max-age=3600", + "text/html; charset=utf-8", + "max caching request", + }, + { + "/non-caching.html", + "", + http.StatusTeapot, + content, + "90", + "", + "text/html; charset=utf-8", + "no caching request", + }, + } + + for _, c := range cases { + t.Run(c.Description, func(t *testing.T) { + result := httptest.NewRecorder() + reqURL, err := url.Parse("/-/subgroup/project/-/jobs/1/artifacts" + c.Path) + require.NoError(t, err) + r := &http.Request{URL: reqURL} + art := artifact.New(testServer.URL, 1, "gitlab-example.io") + + require.True(t, art.TryMakeRequest("group.gitlab-example.io", result, r, c.Token, func(resp *http.Response) bool { return false })) + require.Equal(t, c.Status, result.Code) + require.Equal(t, c.ContentType, result.Header().Get("Content-Type")) + require.Equal(t, c.Length, result.Header().Get("Content-Length")) + require.Equal(t, c.CacheControl, result.Header().Get("Cache-Control")) + require.Equal(t, c.Content, result.Body.String()) + }) + } +} + +// provide stub for testing different artifact responses +func makeArtifactServerStub(t *testing.T, content string, contentType string) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", contentType) + switch r.URL.RawPath { + case "/projects/group%2Fsubgroup%2Fproject/jobs/1/artifacts/200.html": + w.WriteHeader(http.StatusOK) + case "/projects/group%2Fsubgroup%2Fproject/jobs/1/artifacts/max-caching.html": + w.WriteHeader(http.StatusIMUsed) + case "/projects/group%2Fsubgroup%2Fproject/jobs/1/artifacts/non-caching.html": + w.WriteHeader(http.StatusTeapot) + case "/projects/group%2Fsubgroup%2Fproject/jobs/1/artifacts/500.html": + w.WriteHeader(http.StatusInternalServerError) + case "/projects/group%2Fsubgroup%2Fgroup%2Fproject/jobs/1/artifacts/404.html": + w.WriteHeader(http.StatusNotFound) + default: + t.Log("Surprising r.URL.RawPath", r.URL.RawPath) + w.WriteHeader(999) + } + fmt.Fprint(w, content) + })) +} + +func TestBuildURL(t *testing.T) { + cases := []struct { + RawServer string + Host string + Path string + Expected string + PagesDomain string + Ok bool + Description string + }{ + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-/project/-/jobs/1/artifacts/", + `https://gitlab.com/api/v4/projects/group%2Fproject/jobs/1/artifacts/`, + "gitlab.io", + true, + "Basic case", + }, + { + "https://gitlab.com/api/v4/", + "group.gitlab.io", + "/-/project/-/jobs/1/artifacts/", + "https://gitlab.com/api/v4/projects/group%2Fproject/jobs/1/artifacts/", + "gitlab.io", + true, + "API URL has trailing slash", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-/project/-/jobs/1/artifacts/path/to/file {!#1.txt", + "https://gitlab.com/api/v4/projects/group%2Fproject/jobs/1/artifacts/path/to/file%20%7B%21%231.txt", + "gitlab.io", + true, + "Special characters in name", + }, + { + "https://gitlab.com/api/v4/", + "GROUP.GITLAB.IO", + "/-/SUBGROUP/PROJECT/-/JOBS/1/ARTIFACTS/PATH/TO/FILE.txt", + "https://gitlab.com/api/v4/projects/GROUP%2FSUBGROUP%2FPROJECT/jobs/1/artifacts/PATH/TO/FILE.txt", + "gitlab.io", + true, + "Uppercase names", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-/project/-/jobs/1foo1/artifacts/", + "", + "gitlab.io", + false, + "Job ID has letters", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-/project/-/jobs/1$1/artifacts/", + "", + "gitlab.io", + false, + "Job ID has special characters", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-/project/-/jobs/1/artifacts/path/to/file.txt", + "https://gitlab.com/api/v4/projects/group%2Fproject/jobs/1/artifacts/path/to/file.txt", + "gitlab.io", + true, + "Artifact in subdirectory", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-/subgroup1/sub.group2/project/-/jobs/1/artifacts/path/to/file.txt", + "https://gitlab.com/api/v4/projects/group%2Fsubgroup1%2Fsub.group2%2Fproject/jobs/1/artifacts/path/to/file.txt", + "gitlab.io", + true, + "Basic subgroup case", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-//project/-/jobs/1/artifacts/", + "https://gitlab.com/api/v4/projects/group%2Fproject/jobs/1/artifacts/", + "gitlab.io", + true, + "Leading / in remainder of project path", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-/subgroup/project//-/jobs/1/artifacts/", + "https://gitlab.com/api/v4/projects/group%2Fsubgroup%2Fproject/jobs/1/artifacts/", + "gitlab.io", + true, + "Trailing / in remainder of project path", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-//subgroup/project//-/jobs/1/artifacts/", + "https://gitlab.com/api/v4/projects/group%2Fsubgroup%2Fproject/jobs/1/artifacts/", + "gitlab.io", + true, + "Leading and trailing /", + }, + { + "https://gitlab.com/api/v4", + "group.name.gitlab.io", + "/-/subgroup/project/-/jobs/1/artifacts/", + "https://gitlab.com/api/v4/projects/group.name%2Fsubgroup%2Fproject/jobs/1/artifacts/", + "gitlab.io", + true, + "Toplevel group has period", + }, + { + "https://gitlab.com/api/v4", + "gitlab.io.gitlab.io", + "/-/project/-/jobs/1/artifacts/", + "https://gitlab.com/api/v4/projects/gitlab.io%2Fproject/jobs/1/artifacts/", + "gitlab.io", + true, + "Toplevel group matches pages domain", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/-/project/-/jobs/1/artifacts", + "", + "gitlab.io", + false, + "No artifact specified", + }, + { + "https://gitlab.com/api/v4", + "group.gitlab.io", + "/index.html", + "", + "example.com", + false, + "non matching domain and request", + }, + } + + for _, c := range cases { + t.Run(c.Description, func(t *testing.T) { + a := artifact.New(c.RawServer, 1, c.PagesDomain) + u, ok := a.BuildURL(c.Host, c.Path) + + msg := c.Description + " - generated URL: " + if u != nil { + msg = msg + u.String() + } + + require.Equal(t, c.Ok, ok, msg) + if c.Ok { + require.Equal(t, c.Expected, u.String(), c.Description) + } + }) + } +} diff --git a/internal/auth/auth.go b/internal/auth/auth.go new file mode 100644 index 000000000..215290ba9 --- /dev/null +++ b/internal/auth/auth.go @@ -0,0 +1,672 @@ +package auth + +import ( + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/gorilla/securecookie" + "github.com/gorilla/sessions" + log "github.com/sirupsen/logrus" + "golang.org/x/crypto/hkdf" + + "gitlab.com/gitlab-org/labkit/errortracking" + + "gitlab.com/gitlab-org/gitlab-pages/internal/httperrors" + "gitlab.com/gitlab-org/gitlab-pages/internal/httptransport" + "gitlab.com/gitlab-org/gitlab-pages/internal/request" + "gitlab.com/gitlab-org/gitlab-pages/internal/source" +) + +// nolint: gosec +// gosec: G101: Potential hardcoded credentials +// auth constants, not credentials +const ( + apiURLUserTemplate = "%s/api/v4/user" + apiURLProjectTemplate = "%s/api/v4/projects/%d/pages_access" + authorizeURLTemplate = "%s/oauth/authorize?client_id=%s&redirect_uri=%s&response_type=code&state=%s" + tokenURLTemplate = "%s/oauth/token" + tokenContentTemplate = "client_id=%s&client_secret=%s&code=%s&grant_type=authorization_code&redirect_uri=%s" + callbackPath = "/auth" + authorizeProxyTemplate = "%s?domain=%s&state=%s" + authSessionMaxAge = 60 * 10 // 10 minutes + + failAuthErrMsg = "failed to authenticate request" + fetchAccessTokenErrMsg = "fetching access token failed" + queryParameterErrMsg = "failed to parse domain query parameter" + saveSessionErrMsg = "failed to save the session" +) + +var ( + errResponseNotOk = errors.New("response was not ok") + errAuthNotConfigured = errors.New("authentication is not configured") + errGenerateKeys = errors.New("could not generate auth keys") +) + +// Auth handles authenticating users with GitLab API +type Auth struct { + pagesDomain string + clientID string + clientSecret string + redirectURI string + gitLabServer string + authSecret string + jwtSigningKey []byte + jwtExpiry time.Duration + apiClient *http.Client + store sessions.Store + now func() time.Time // allows to stub time.Now() easily in tests +} + +type tokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + RefreshToken string `json:"refresh_token"` +} + +type errorResponse struct { + Error string `json:"error"` + ErrorDescription string `json:"error_description"` +} +type domain interface { + GetProjectID(r *http.Request) uint64 + ServeNotFoundAuthFailed(w http.ResponseWriter, r *http.Request) +} + +func (a *Auth) getSessionFromStore(r *http.Request) (*sessions.Session, error) { + session, err := a.store.Get(r, "gitlab-pages") + + if session != nil { + // Cookie just for this domain + session.Options.Path = "/" + session.Options.HttpOnly = true + session.Options.Secure = request.IsHTTPS(r) + session.Options.MaxAge = authSessionMaxAge + } + + return session, err +} + +func (a *Auth) checkSession(w http.ResponseWriter, r *http.Request) (*sessions.Session, error) { + // Create or get session + session, errsession := a.getSessionFromStore(r) + + if errsession != nil { + // Save cookie again + errsave := session.Save(r, w) + if errsave != nil { + logRequest(r).WithError(errsave).Error(saveSessionErrMsg) + errortracking.Capture(errsave, errortracking.WithRequest(r)) + httperrors.Serve500(w) + return nil, errsave + } + + http.Redirect(w, r, getRequestAddress(r), 302) + return nil, errsession + } + + return session, nil +} + +// TryAuthenticate tries to authenticate user and fetch access token if request is a callback to /auth? +func (a *Auth) TryAuthenticate(w http.ResponseWriter, r *http.Request, domains source.Source) bool { + if a == nil { + return false + } + + session, err := a.checkSession(w, r) + if err != nil { + return true + } + + // Request is for auth + if r.URL.Path != callbackPath { + return false + } + + logRequest(r).Info("Receive OAuth authentication callback") + + if a.handleProxyingAuth(session, w, r, domains) { + return true + } + + // If callback is not successful + errorParam := r.URL.Query().Get("error") + if errorParam != "" { + logRequest(r).WithField("error", errorParam).Warn("OAuth endpoint returned error") + + httperrors.Serve401(w) + return true + } + + if verifyCodeAndStateGiven(r) { + a.checkAuthenticationResponse(session, w, r) + return true + } + + return false +} + +func (a *Auth) checkAuthenticationResponse(session *sessions.Session, w http.ResponseWriter, r *http.Request) { + if !validateState(r, session) { + // State is NOT ok + logRequest(r).Warn("Authentication state did not match expected") + + httperrors.Serve401(w) + return + } + + redirectURI, ok := session.Values["uri"].(string) + if !ok { + logRequest(r).Error("Can not extract redirect uri from session") + httperrors.Serve500(w) + return + } + + decryptedCode, err := a.DecryptCode(r.URL.Query().Get("code"), getRequestDomain(r)) + if err != nil { + logRequest(r).WithError(err).Error("failed to decrypt secure code") + errortracking.Capture(err, errortracking.WithRequest(r)) + httperrors.Serve500(w) + return + } + + // Fetch access token with authorization code + token, err := a.fetchAccessToken(decryptedCode) + if err != nil { + // Fetching token not OK + logRequest(r).WithError(err).WithField( + "redirect_uri", redirectURI, + ).Error(fetchAccessTokenErrMsg) + errortracking.Capture( + err, + errortracking.WithRequest(r), + errortracking.WithField("redirect_uri", redirectURI)) + + httperrors.Serve503(w) + return + } + + // Store access token + session.Values["access_token"] = token.AccessToken + err = session.Save(r, w) + if err != nil { + logRequest(r).WithError(err).Error(saveSessionErrMsg) + errortracking.Capture(err, errortracking.WithRequest(r)) + + httperrors.Serve500(w) + return + } + + // Redirect back to requested URI + logRequest(r).WithField( + "redirect_uri", redirectURI, + ).Info("Authentication was successful, redirecting user back to requested page") + + http.Redirect(w, r, redirectURI, 302) +} + +func (a *Auth) domainAllowed(name string, domains source.Source) bool { + isConfigured := (name == a.pagesDomain) || strings.HasSuffix("."+name, a.pagesDomain) + + if isConfigured { + return true + } + + domain, err := domains.GetDomain(name) + + // domain exists and there is no error + return (domain != nil && err == nil) +} + +func (a *Auth) handleProxyingAuth(session *sessions.Session, w http.ResponseWriter, r *http.Request, domains source.Source) bool { + // handle auth callback e.g. https://gitlab.io/auth?domain&domain&state=state + if shouldProxyAuthToGitlab(r) { + domain := r.URL.Query().Get("domain") + state := r.URL.Query().Get("state") + + proxyurl, err := url.Parse(domain) + if err != nil { + logRequest(r).WithField("domain", domain).Error(queryParameterErrMsg) + errortracking.Capture(err, errortracking.WithRequest(r), errortracking.WithField("domain", domain)) + + httperrors.Serve500(w) + return true + } + host, _, err := net.SplitHostPort(proxyurl.Host) + if err != nil { + host = proxyurl.Host + } + + if !a.domainAllowed(host, domains) { + logRequest(r).WithField("domain", host).Warn("Domain is not configured") + httperrors.Serve401(w) + return true + } + + logRequest(r).WithField("domain", domain).Info("User is authenticating via domain") + + session.Values["proxy_auth_domain"] = domain + + err = session.Save(r, w) + if err != nil { + logRequest(r).WithError(err).Error(saveSessionErrMsg) + errortracking.Capture(err, errortracking.WithRequest(r)) + + httperrors.Serve500(w) + return true + } + + url := fmt.Sprintf(authorizeURLTemplate, a.gitLabServer, a.clientID, a.redirectURI, state) + + logRequest(r).WithFields(log.Fields{ + "gitlab_server": a.gitLabServer, + "pages_domain": domain, + }).Info("Redirecting user to gitlab for oauth") + + http.Redirect(w, r, url, 302) + + return true + } + + // If auth request callback should be proxied to custom domain + // redirect to originating domain set in the cookie as proxy_auth_domain + if shouldProxyCallbackToCustomDomain(r, session) { + // Get domain started auth process + proxyDomain := session.Values["proxy_auth_domain"].(string) + + logRequest(r).WithField("domain", proxyDomain).Info("Redirecting auth callback to custom domain") + + // Clear proxying from session + delete(session.Values, "proxy_auth_domain") + err := session.Save(r, w) + if err != nil { + logRequest(r).WithError(err).Error(saveSessionErrMsg) + errortracking.Capture(err, errortracking.WithRequest(r)) + + httperrors.Serve500(w) + return true + } + + query := r.URL.Query() + + // prevent https://tools.ietf.org/html/rfc6749#section-10.6 and + // https://gitlab.com/gitlab-org/gitlab-pages/-/issues/262 by encrypting + // and signing the OAuth code + signedCode, err := a.EncryptAndSignCode(proxyDomain, query.Get("code")) + if err != nil { + logRequest(r).WithError(err).Error(saveSessionErrMsg) + errortracking.Capture(err, errortracking.WithRequest(r)) + + httperrors.Serve503(w) + return true + } + + // prevent forwarding access token, more context on the security issue + // https://gitlab.com/gitlab-org/gitlab/-/issues/285244#note_451266051 + query.Del("token") + + // replace code with signed code + query.Set("code", signedCode) + + // Redirect pages to originating domain with code and state to finish + // authentication process + http.Redirect(w, r, proxyDomain+r.URL.Path+"?"+query.Encode(), 302) + return true + } + + return false +} + +func getRequestAddress(r *http.Request) string { + if request.IsHTTPS(r) { + return "https://" + r.Host + r.RequestURI + } + return "http://" + r.Host + r.RequestURI +} + +func getRequestDomain(r *http.Request) string { + if request.IsHTTPS(r) { + return "https://" + r.Host + } + return "http://" + r.Host +} + +func shouldProxyAuthToGitlab(r *http.Request) bool { + return r.URL.Query().Get("domain") != "" && r.URL.Query().Get("state") != "" +} + +func shouldProxyCallbackToCustomDomain(r *http.Request, session *sessions.Session) bool { + return session.Values["proxy_auth_domain"] != nil +} + +func validateState(r *http.Request, session *sessions.Session) bool { + state := r.URL.Query().Get("state") + if state == "" { + // No state param + return false + } + + // Check state + if session.Values["state"] == nil || session.Values["state"].(string) != state { + // State does not match + return false + } + + // State ok + return true +} + +func verifyCodeAndStateGiven(r *http.Request) bool { + return r.URL.Query().Get("code") != "" && r.URL.Query().Get("state") != "" +} + +func (a *Auth) fetchAccessToken(code string) (tokenResponse, error) { + token := tokenResponse{} + + // Prepare request + url := fmt.Sprintf(tokenURLTemplate, a.gitLabServer) + content := fmt.Sprintf(tokenContentTemplate, a.clientID, a.clientSecret, code, a.redirectURI) + req, err := http.NewRequest("POST", url, strings.NewReader(content)) + + if err != nil { + return token, err + } + + // Request token + resp, err := a.apiClient.Do(req) + + if err != nil { + return token, err + } + + if resp.StatusCode != 200 { + err = errResponseNotOk + errortracking.Capture(err, errortracking.WithRequest(req)) + return token, err + } + + // Parse response + defer resp.Body.Close() + err = json.NewDecoder(resp.Body).Decode(&token) + if err != nil { + return token, err + } + + return token, nil +} + +func (a *Auth) checkSessionIsValid(w http.ResponseWriter, r *http.Request) *sessions.Session { + session, err := a.checkSession(w, r) + if err != nil { + return nil + } + + // redirect to /auth?domain=%s&state=%s + if a.checkTokenExists(session, w, r) { + return nil + } + + return session +} + +func (a *Auth) checkTokenExists(session *sessions.Session, w http.ResponseWriter, r *http.Request) bool { + // If no access token redirect to OAuth login page + if session.Values["access_token"] == nil { + logRequest(r).Debug("No access token exists, redirecting user to OAuth2 login") + + // Generate state hash and store requested address + state := base64.URLEncoding.EncodeToString(securecookie.GenerateRandomKey(16)) + session.Values["state"] = state + session.Values["uri"] = getRequestAddress(r) + + // Clear possible proxying + delete(session.Values, "proxy_auth_domain") + + err := session.Save(r, w) + if err != nil { + logRequest(r).WithError(err).Error(saveSessionErrMsg) + errortracking.Capture(err, errortracking.WithRequest(r)) + + httperrors.Serve500(w) + return true + } + + // Because the pages domain might be in public suffix list, we have to + // redirect to pages domain to trigger authorization flow + http.Redirect(w, r, a.getProxyAddress(r, state), 302) + + return true + } + return false +} + +func (a *Auth) getProxyAddress(r *http.Request, state string) string { + return fmt.Sprintf(authorizeProxyTemplate, a.redirectURI, getRequestDomain(r), state) +} + +func destroySession(session *sessions.Session, w http.ResponseWriter, r *http.Request) { + logRequest(r).Debug("Destroying session") + + // Invalidate access token and redirect back for refreshing and re-authenticating + delete(session.Values, "access_token") + err := session.Save(r, w) + if err != nil { + logRequest(r).WithError(err).Error(saveSessionErrMsg) + errortracking.Capture(err, errortracking.WithRequest(r)) + + httperrors.Serve500(w) + return + } + + http.Redirect(w, r, getRequestAddress(r), 302) +} + +// IsAuthSupported checks if pages is running with the authentication support +func (a *Auth) IsAuthSupported() bool { + return a != nil +} + +func (a *Auth) checkAuthentication(w http.ResponseWriter, r *http.Request, domain domain) bool { + session := a.checkSessionIsValid(w, r) + if session == nil { + return true + } + + projectID := domain.GetProjectID(r) + // Access token exists, authorize request + var url string + if projectID > 0 { + url = fmt.Sprintf(apiURLProjectTemplate, a.gitLabServer, projectID) + } else { + url = fmt.Sprintf(apiURLUserTemplate, a.gitLabServer) + } + req, err := http.NewRequest("GET", url, nil) + + if err != nil { + logRequest(r).WithError(err).Error(failAuthErrMsg) + errortracking.Capture(err, errortracking.WithRequest(req)) + + httperrors.Serve500(w) + return true + } + + req.Header.Add("Authorization", "Bearer "+session.Values["access_token"].(string)) + resp, err := a.apiClient.Do(req) + + if err == nil && checkResponseForInvalidToken(resp, session, w, r) { + return true + } + + if err != nil || resp.StatusCode != 200 { + if err != nil { + logRequest(r).WithError(err).Error("Failed to retrieve info with token") + } + + // call serve404 handler when auth fails + domain.ServeNotFoundAuthFailed(w, r) + return true + } + + return false +} + +// CheckAuthenticationWithoutProject checks if user is authenticated and has a valid token +func (a *Auth) CheckAuthenticationWithoutProject(w http.ResponseWriter, r *http.Request, domain domain) bool { + if a == nil { + // No auth supported + return false + } + + return a.checkAuthentication(w, r, domain) +} + +// GetTokenIfExists returns the token if it exists +func (a *Auth) GetTokenIfExists(w http.ResponseWriter, r *http.Request) (string, error) { + if a == nil { + return "", nil + } + + session, err := a.checkSession(w, r) + if err != nil { + return "", errors.New("Error retrieving the session") + } + + if session.Values["access_token"] != nil { + return session.Values["access_token"].(string), nil + } + + return "", nil +} + +// RequireAuth will trigger authentication flow if no token exists +func (a *Auth) RequireAuth(w http.ResponseWriter, r *http.Request) bool { + return a.checkSessionIsValid(w, r) == nil +} + +// CheckAuthentication checks if user is authenticated and has access to the project +// will return contentServed = false when authFailed = true +func (a *Auth) CheckAuthentication(w http.ResponseWriter, r *http.Request, domain domain) bool { + logRequest(r).Debug("Authenticate request") + + if a == nil { + logRequest(r).Error(errAuthNotConfigured) + errortracking.Capture(errAuthNotConfigured, errortracking.WithRequest(r)) + + httperrors.Serve500(w) + return true + } + + return a.checkAuthentication(w, r, domain) +} + +// CheckResponseForInvalidToken checks response for invalid token and destroys session if it was invalid +func (a *Auth) CheckResponseForInvalidToken(w http.ResponseWriter, r *http.Request, + resp *http.Response) bool { + if a == nil { + // No auth supported + return false + } + + session, err := a.checkSession(w, r) + if err != nil { + return true + } + + if checkResponseForInvalidToken(resp, session, w, r) { + return true + } + + return false +} + +func checkResponseForInvalidToken(resp *http.Response, session *sessions.Session, w http.ResponseWriter, r *http.Request) bool { + if resp.StatusCode == http.StatusUnauthorized { + errResp := errorResponse{} + + // Parse response + defer resp.Body.Close() + err := json.NewDecoder(resp.Body).Decode(&errResp) + if err != nil { + errortracking.Capture(err) + return false + } + + if errResp.Error == "invalid_token" { + // Token is invalid + logRequest(r).Warn("Access token was invalid, destroying session") + + destroySession(session, w, r) + return true + } + } + + return false +} + +func logRequest(r *http.Request) *log.Entry { + state := r.URL.Query().Get("state") + return log.WithFields(log.Fields{ + "host": r.Host, + "path": r.URL.Path, + "state": state, + }) +} + +// generateKeys derives count hkdf keys from a secret, ensuring the key is +// the same for the same secret used across multiple instances +func generateKeys(secret string, count int) ([][]byte, error) { + keys := make([][]byte, count) + hkdfReader := hkdf.New(sha256.New, []byte(secret), []byte{}, []byte("PAGES_SIGNING_AND_ENCRYPTION_KEY")) + + for i := 0; i < count; i++ { + key := make([]byte, 32) + if _, err := io.ReadFull(hkdfReader, key); err != nil { + return nil, err + } + + keys[i] = key + } + + if len(keys) < count { + return nil, errGenerateKeys + } + + return keys, nil +} + +// New when authentication supported this will be used to create authentication handler +func New(pagesDomain string, storeSecret string, clientID string, clientSecret string, + redirectURI string, gitLabServer string) (*Auth, error) { + // generate 3 keys, 2 for the cookie store and 1 for JWT signing + keys, err := generateKeys(storeSecret, 3) + if err != nil { + return nil, err + } + + return &Auth{ + pagesDomain: pagesDomain, + clientID: clientID, + clientSecret: clientSecret, + redirectURI: redirectURI, + gitLabServer: strings.TrimRight(gitLabServer, "/"), + apiClient: &http.Client{ + Timeout: 5 * time.Second, + Transport: httptransport.InternalTransport, + }, + store: sessions.NewCookieStore(keys[0], keys[1]), + authSecret: storeSecret, + jwtSigningKey: keys[2], + jwtExpiry: time.Minute, + now: time.Now, + }, nil +} diff --git a/internal/auth/auth_code.go b/internal/auth/auth_code.go new file mode 100644 index 000000000..d2fea5a95 --- /dev/null +++ b/internal/auth/auth_code.go @@ -0,0 +1,147 @@ +package auth + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + + "github.com/dgrijalva/jwt-go" + "github.com/gorilla/securecookie" + "golang.org/x/crypto/hkdf" +) + +var ( + errInvalidToken = errors.New("invalid token") + errEmptyDomainOrCode = errors.New("empty domain or code") + errInvalidNonce = errors.New("invalid nonce") + errInvalidCode = errors.New("invalid code") +) + +// EncryptAndSignCode encrypts the OAuth code deriving the key from the domain. +// It adds the code and domain as JWT token claims and signs it using signingKey derived from +// the Auth secret. +func (a *Auth) EncryptAndSignCode(domain, code string) (string, error) { + if domain == "" || code == "" { + return "", errEmptyDomainOrCode + } + + nonce := base64.URLEncoding.EncodeToString(securecookie.GenerateRandomKey(16)) + + aesGcm, err := a.newAesGcmCipher(domain, nonce) + if err != nil { + return "", err + } + + // encrypt code with a randomly generated nonce + encryptedCode := aesGcm.Seal(nil, []byte(nonce), []byte(code), nil) + + // generate JWT token claims with encrypted code + claims := jwt.MapClaims{ + // standard claims + "iss": "gitlab-pages", + "iat": a.now().Unix(), + "exp": a.now().Add(a.jwtExpiry).Unix(), + // custom claims + "domain": domain, // pass the domain so we can validate the signed domain matches the requested domain + "code": hex.EncodeToString(encryptedCode), + "nonce": nonce, + } + + return jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString(a.jwtSigningKey) +} + +// DecryptCode decodes the secureCode as a JWT token and validates its signature. +// It then decrypts the code from the token claims and returns it. +func (a *Auth) DecryptCode(jwt, domain string) (string, error) { + claims, err := a.parseJWTClaims(jwt) + if err != nil { + return "", err + } + + // get nonce and encryptedCode from the JWT claims + nonce, ok := claims["nonce"].(string) + if !ok { + return "", errInvalidNonce + } + + encryptedCode, ok := claims["code"].(string) + if !ok { + return "", errInvalidCode + } + + cipherText, err := hex.DecodeString(encryptedCode) + if err != nil { + return "", err + } + + aesGcm, err := a.newAesGcmCipher(domain, nonce) + if err != nil { + return "", err + } + + decryptedCode, err := aesGcm.Open(nil, []byte(nonce), cipherText, nil) + if err != nil { + return "", err + } + + return string(decryptedCode), nil +} + +func (a *Auth) codeKey(domain string) ([]byte, error) { + hkdfReader := hkdf.New(sha256.New, []byte(a.authSecret), []byte(domain), []byte("PAGES_AUTH_CODE_ENCRYPTION_KEY")) + + key := make([]byte, 32) + if _, err := io.ReadFull(hkdfReader, key); err != nil { + return nil, err + } + + return key, nil +} + +func (a *Auth) parseJWTClaims(secureCode string) (jwt.MapClaims, error) { + token, err := jwt.Parse(secureCode, a.getSigningKey) + if err != nil { + return nil, err + } + + claims, ok := token.Claims.(jwt.MapClaims) + if !ok || !token.Valid { + return nil, errInvalidToken + } + + return claims, nil +} + +func (a *Auth) getSigningKey(token *jwt.Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + + return a.jwtSigningKey, nil +} + +func (a *Auth) newAesGcmCipher(domain, nonce string) (cipher.AEAD, error) { + // get the same key for a domain + key, err := a.codeKey(domain) + if err != nil { + return nil, err + } + + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aesGcm, err := cipher.NewGCMWithNonceSize(block, len(nonce)) + if err != nil { + return nil, err + } + + return aesGcm, nil +} diff --git a/internal/auth/auth_code_test.go b/internal/auth/auth_code_test.go new file mode 100644 index 000000000..d54fcc7ea --- /dev/null +++ b/internal/auth/auth_code_test.go @@ -0,0 +1,99 @@ +package auth + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestEncryptAndDecryptSignedCode(t *testing.T) { + auth := createTestAuth(t, "") + + tests := map[string]struct { + auth *Auth + encDomain string + code string + expectedEncErrMsg string + decDomain string + expectedDecErrMsg string + }{ + "happy_path": { + auth: auth, + encDomain: "domain", + decDomain: "domain", + code: "code", + }, + "empty_domain": { + auth: auth, + encDomain: "", + code: "code", + expectedEncErrMsg: "empty domain or code", + }, + "empty_code": { + auth: auth, + encDomain: "domain", + code: "", + expectedEncErrMsg: "empty domain or code", + }, + "different_dec_domain": { + auth: auth, + encDomain: "domain", + decDomain: "another", + code: "code", + expectedDecErrMsg: "cipher: message authentication failed", + }, + "expired_token": { + auth: func() *Auth { + newAuth := *auth + newAuth.jwtExpiry = time.Nanosecond + newAuth.now = func() time.Time { + return time.Time{} + } + + return &newAuth + }(), + encDomain: "domain", + code: "code", + decDomain: "domain", + expectedDecErrMsg: "Token is expired", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + encCode, err := test.auth.EncryptAndSignCode(test.encDomain, test.code) + if test.expectedEncErrMsg != "" { + require.EqualError(t, err, test.expectedEncErrMsg) + require.Empty(t, encCode) + return + } + + require.NoError(t, err) + require.NotEmpty(t, encCode) + + decCode, err := test.auth.DecryptCode(encCode, test.decDomain) + if test.expectedDecErrMsg != "" { + require.EqualError(t, err, test.expectedDecErrMsg) + require.Empty(t, decCode) + return + } + + require.NoError(t, err) + require.Equal(t, test.code, decCode) + }) + } +} + +func TestDecryptCodeWithInvalidJWT(t *testing.T) { + auth1 := createTestAuth(t, "") + auth2 := createTestAuth(t, "") + auth2.jwtSigningKey = []byte("another signing key") + + encCode, err := auth1.EncryptAndSignCode("domain", "code") + require.NoError(t, err) + + decCode, err := auth2.DecryptCode(encCode, "domain") + require.EqualError(t, err, "signature is invalid") + require.Empty(t, decCode) +} diff --git a/internal/auth/auth_test.go b/internal/auth/auth_test.go new file mode 100644 index 000000000..ce7d83207 --- /dev/null +++ b/internal/auth/auth_test.go @@ -0,0 +1,468 @@ +package auth + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/gorilla/sessions" + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/request" + "gitlab.com/gitlab-org/gitlab-pages/internal/source" +) + +func createTestAuth(t *testing.T, url string) *Auth { + t.Helper() + + a, err := New("pages.gitlab-example.com", + "something-very-secret", + "id", + "secret", + "http://pages.gitlab-example.com/auth", + url) + + require.NoError(t, err) + + return a +} + +type domainMock struct { + projectID uint64 + notFoundContent string +} + +func (dm *domainMock) GetProjectID(r *http.Request) uint64 { + return dm.projectID +} + +func (dm *domainMock) ServeNotFoundAuthFailed(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + w.Write([]byte(dm.notFoundContent)) +} + +// Gorilla's sessions use request context to save session +// Which makes session sharable between test code and actually manipulating session +// Which leads to negative side effects: we can't test encryption, and cookie params +// like max-age and secure are not being properly set +// To avoid that we use fake request, and set only session cookie without copying context +func setSessionValues(t *testing.T, r *http.Request, store sessions.Store, values map[interface{}]interface{}) { + t.Helper() + + tmpRequest, err := http.NewRequest("GET", "/", nil) + require.NoError(t, err) + + result := httptest.NewRecorder() + + session, _ := store.Get(tmpRequest, "gitlab-pages") + session.Values = values + session.Save(tmpRequest, result) + + for _, cookie := range result.Result().Cookies() { + r.AddCookie(cookie) + } +} + +func TestTryAuthenticate(t *testing.T) { + auth := createTestAuth(t, "") + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/something/else") + require.NoError(t, err) + reqURL.Scheme = request.SchemeHTTPS + r := &http.Request{URL: reqURL} + + require.Equal(t, false, auth.TryAuthenticate(result, r, source.NewMockSource())) +} + +func TestTryAuthenticateWithError(t *testing.T) { + auth := createTestAuth(t, "") + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/auth?error=access_denied") + require.NoError(t, err) + + reqURL.Scheme = request.SchemeHTTPS + r := &http.Request{URL: reqURL} + + require.Equal(t, true, auth.TryAuthenticate(result, r, source.NewMockSource())) + require.Equal(t, 401, result.Code) +} + +func TestTryAuthenticateWithCodeButInvalidState(t *testing.T) { + auth := createTestAuth(t, "") + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/auth?code=1&state=invalid") + require.NoError(t, err) + reqURL.Scheme = request.SchemeHTTPS + r := &http.Request{URL: reqURL} + + session, err := auth.store.Get(r, "gitlab-pages") + require.NoError(t, err) + + session.Values["state"] = "state" + session.Save(r, result) + + require.Equal(t, true, auth.TryAuthenticate(result, r, source.NewMockSource())) + require.Equal(t, 401, result.Code) +} + +func TestTryAuthenticateRemoveTokenFromRedirect(t *testing.T) { + auth := createTestAuth(t, "") + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/auth?code=1&state=state&token=secret") + require.NoError(t, err) + + require.Equal(t, reqURL.Query().Get("token"), "secret", "token is present before redirecting") + reqURL.Scheme = request.SchemeHTTPS + r := &http.Request{URL: reqURL} + + session, err := auth.store.Get(r, "gitlab-pages") + require.NoError(t, err) + + session.Values["state"] = "state" + session.Values["proxy_auth_domain"] = "https://domain.com" + session.Save(r, result) + + require.Equal(t, true, auth.TryAuthenticate(result, r, source.NewMockSource())) + require.Equal(t, http.StatusFound, result.Code) + + redirect, err := url.Parse(result.Header().Get("Location")) + require.NoError(t, err) + + require.Empty(t, redirect.Query().Get("token"), "token is gone after redirecting") +} + +func testTryAuthenticateWithCodeAndState(t *testing.T, https bool) { + t.Helper() + + apiServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/token": + require.Equal(t, "POST", r.Method) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, "{\"access_token\":\"abc\"}") + case "/api/v4/projects/1000/pages_access": + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusOK) + default: + t.Logf("Unexpected r.URL.RawPath: %q", r.URL.Path) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusNotFound) + } + })) + + apiServer.Start() + defer apiServer.Close() + + auth := createTestAuth(t, apiServer.URL) + + domain := apiServer.URL + if https { + domain = strings.Replace(apiServer.URL, "http://", "https://", -1) + } + + code, err := auth.EncryptAndSignCode(domain, "1") + require.NoError(t, err) + + r, err := http.NewRequest("GET", "/auth?code="+code+"&state=state", nil) + require.NoError(t, err) + if https { + r.URL.Scheme = request.SchemeHTTPS + } else { + r.URL.Scheme = request.SchemeHTTP + } + + r.Host = strings.TrimPrefix(apiServer.URL, "http://") + + setSessionValues(t, r, auth.store, map[interface{}]interface{}{ + "uri": "https://pages.gitlab-example.com/project/", + "state": "state", + }) + + result := httptest.NewRecorder() + require.Equal(t, true, auth.TryAuthenticate(result, r, source.NewMockSource())) + require.Equal(t, http.StatusFound, result.Code) + require.Equal(t, "https://pages.gitlab-example.com/project/", result.Header().Get("Location")) + require.Equal(t, 600, result.Result().Cookies()[0].MaxAge) + require.Equal(t, https, result.Result().Cookies()[0].Secure) +} + +func TestTryAuthenticateWithCodeAndStateOverHTTP(t *testing.T) { + testTryAuthenticateWithCodeAndState(t, false) +} + +func TestTryAuthenticateWithCodeAndStateOverHTTPS(t *testing.T) { + testTryAuthenticateWithCodeAndState(t, true) +} + +func TestCheckAuthenticationWhenAccess(t *testing.T) { + apiServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v4/projects/1000/pages_access": + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusOK) + default: + t.Logf("Unexpected r.URL.RawPath: %q", r.URL.Path) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusNotFound) + } + })) + + apiServer.Start() + defer apiServer.Close() + + auth := createTestAuth(t, apiServer.URL) + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/auth?code=1&state=state") + require.NoError(t, err) + reqURL.Scheme = request.SchemeHTTPS + r := &http.Request{URL: reqURL} + + session, err := auth.store.Get(r, "gitlab-pages") + require.NoError(t, err) + + session.Values["access_token"] = "abc" + session.Save(r, result) + contentServed := auth.CheckAuthentication(result, r, &domainMock{projectID: 1000}) + require.False(t, contentServed) + + // notFoundContent wasn't served so the default response from CheckAuthentication should be 200 + require.Equal(t, 200, result.Code) +} + +func TestCheckAuthenticationWhenNoAccess(t *testing.T) { + apiServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v4/projects/1000/pages_access": + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusUnauthorized) + default: + t.Logf("Unexpected r.URL.RawPath: %q", r.URL.Path) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusNotFound) + } + })) + + apiServer.Start() + defer apiServer.Close() + + auth := createTestAuth(t, apiServer.URL) + + w := httptest.NewRecorder() + + reqURL, err := url.Parse("/auth?code=1&state=state") + require.NoError(t, err) + reqURL.Scheme = request.SchemeHTTPS + r := &http.Request{URL: reqURL} + + session, err := auth.store.Get(r, "gitlab-pages") + require.NoError(t, err) + + session.Values["access_token"] = "abc" + session.Save(r, w) + + contentServed := auth.CheckAuthentication(w, r, &domainMock{projectID: 1000, notFoundContent: "Generic 404"}) + require.True(t, contentServed) + res := w.Result() + defer res.Body.Close() + + require.Equal(t, 404, res.StatusCode) + + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + require.Equal(t, string(body), "Generic 404") +} + +func TestCheckAuthenticationWhenInvalidToken(t *testing.T) { + apiServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v4/projects/1000/pages_access": + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusUnauthorized) + fmt.Fprint(w, "{\"error\":\"invalid_token\"}") + default: + t.Logf("Unexpected r.URL.RawPath: %q", r.URL.Path) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusNotFound) + } + })) + + apiServer.Start() + defer apiServer.Close() + + auth := createTestAuth(t, apiServer.URL) + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/auth?code=1&state=state") + require.NoError(t, err) + r := &http.Request{URL: reqURL} + + session, err := auth.store.Get(r, "gitlab-pages") + require.NoError(t, err) + + session.Values["access_token"] = "abc" + err = session.Save(r, result) + require.NoError(t, err) + + contentServed := auth.CheckAuthentication(result, r, &domainMock{projectID: 1000}) + require.True(t, contentServed) + require.Equal(t, 302, result.Code) +} + +func TestCheckAuthenticationWithoutProject(t *testing.T) { + apiServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v4/user": + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusOK) + default: + t.Logf("Unexpected r.URL.RawPath: %q", r.URL.Path) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusNotFound) + } + })) + + apiServer.Start() + defer apiServer.Close() + + auth := createTestAuth(t, apiServer.URL) + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/auth?code=1&state=state") + require.NoError(t, err) + reqURL.Scheme = request.SchemeHTTPS + r := &http.Request{URL: reqURL} + + session, err := auth.store.Get(r, "gitlab-pages") + require.NoError(t, err) + + session.Values["access_token"] = "abc" + session.Save(r, result) + + contentServed := auth.CheckAuthenticationWithoutProject(result, r, &domainMock{projectID: 0}) + require.False(t, contentServed) + require.Equal(t, 200, result.Code) +} + +func TestCheckAuthenticationWithoutProjectWhenInvalidToken(t *testing.T) { + apiServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v4/user": + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusUnauthorized) + fmt.Fprint(w, "{\"error\":\"invalid_token\"}") + default: + t.Logf("Unexpected r.URL.RawPath: %q", r.URL.Path) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusNotFound) + } + })) + + apiServer.Start() + defer apiServer.Close() + + auth := createTestAuth(t, apiServer.URL) + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/auth?code=1&state=state") + require.NoError(t, err) + r := &http.Request{URL: reqURL} + + session, err := auth.store.Get(r, "gitlab-pages") + require.NoError(t, err) + + session.Values["access_token"] = "abc" + session.Save(r, result) + + contentServed := auth.CheckAuthenticationWithoutProject(result, r, &domainMock{projectID: 0}) + require.True(t, contentServed) + require.Equal(t, 302, result.Code) +} + +func TestGenerateKeys(t *testing.T) { + keys, err := generateKeys("something-very-secret", 3) + require.NoError(t, err) + require.Len(t, keys, 3) + + require.NotEqual(t, fmt.Sprint(keys[0]), fmt.Sprint(keys[1])) + require.NotEqual(t, fmt.Sprint(keys[0]), fmt.Sprint(keys[2])) + require.NotEqual(t, fmt.Sprint(keys[1]), fmt.Sprint(keys[2])) + + require.Equal(t, len(keys[0]), 32) + require.Equal(t, len(keys[1]), 32) + require.Equal(t, len(keys[2]), 32) +} + +func TestGetTokenIfExistsWhenTokenExists(t *testing.T) { + auth := createTestAuth(t, "") + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/") + require.NoError(t, err) + r := &http.Request{URL: reqURL} + + session, err := auth.store.Get(r, "gitlab-pages") + require.NoError(t, err) + + session.Values["access_token"] = "abc" + session.Save(r, result) + + token, err := auth.GetTokenIfExists(result, r) + require.NoError(t, err) + require.Equal(t, "abc", token) +} + +func TestGetTokenIfExistsWhenTokenDoesNotExist(t *testing.T) { + auth := createTestAuth(t, "") + + result := httptest.NewRecorder() + reqURL, err := url.Parse("http://pages.gitlab-example.com/test") + require.NoError(t, err) + r := &http.Request{URL: reqURL, Host: "pages.gitlab-example.com", RequestURI: "/test"} + + session, err := auth.store.Get(r, "gitlab-pages") + require.NoError(t, err) + + session.Save(r, result) + + token, err := auth.GetTokenIfExists(result, r) + require.Equal(t, "", token) + require.Equal(t, nil, err) +} + +func TestCheckResponseForInvalidTokenWhenInvalidToken(t *testing.T) { + auth := createTestAuth(t, "") + + result := httptest.NewRecorder() + reqURL, err := url.Parse("http://pages.gitlab-example.com/test") + require.NoError(t, err) + r := &http.Request{URL: reqURL, Host: "pages.gitlab-example.com", RequestURI: "/test"} + + resp := &http.Response{StatusCode: http.StatusUnauthorized, Body: ioutil.NopCloser(bytes.NewReader([]byte("{\"error\":\"invalid_token\"}")))} + + require.Equal(t, true, auth.CheckResponseForInvalidToken(result, r, resp)) + require.Equal(t, http.StatusFound, result.Result().StatusCode) + require.Equal(t, "http://pages.gitlab-example.com/test", result.Header().Get("Location")) +} + +func TestCheckResponseForInvalidTokenWhenNotInvalidToken(t *testing.T) { + auth := createTestAuth(t, "") + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/something") + require.NoError(t, err) + r := &http.Request{URL: reqURL} + + resp := &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader([]byte("ok")))} + + require.Equal(t, false, auth.CheckResponseForInvalidToken(result, r, resp)) +} diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 000000000..c52beef82 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,18 @@ +package config + +import ( + "time" +) + +type Config struct { + Zip *ZipServing +} + +// ZipServing stores all configuration values to be used by the zip VFS opening and +// caching +type ZipServing struct { + ExpirationInterval time.Duration + CleanupInterval time.Duration + RefreshInterval time.Duration + OpenTimeout time.Duration +} diff --git a/internal/domain/domain.go b/internal/domain/domain.go new file mode 100644 index 000000000..636b1bbd3 --- /dev/null +++ b/internal/domain/domain.go @@ -0,0 +1,215 @@ +package domain + +import ( + "context" + "crypto/tls" + "errors" + "net/http" + "sync" + + "gitlab.com/gitlab-org/labkit/errortracking" + + "gitlab.com/gitlab-org/gitlab-pages/internal/httperrors" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" +) + +// ErrDomainDoesNotExist returned when a domain is not found or when a lookup path +// for a domain could not be resolved +var ErrDomainDoesNotExist = errors.New("domain does not exist") + +// Domain is a domain that gitlab-pages can serve. +type Domain struct { + Name string + CertificateCert string + CertificateKey string + + Resolver Resolver + + certificate *tls.Certificate + certificateError error + certificateOnce sync.Once +} + +// New creates a new domain with a resolver and existing certificates +func New(name, cert, key string, resolver Resolver) *Domain { + return &Domain{ + Name: name, + CertificateCert: cert, + CertificateKey: key, + Resolver: resolver, + } +} + +// String implements Stringer. +func (d *Domain) String() string { + return d.Name +} + +func (d *Domain) resolve(r *http.Request) (*serving.Request, error) { + if d == nil { + return nil, ErrDomainDoesNotExist + } + + return d.Resolver.Resolve(r) +} + +// GetLookupPath returns a project details based on the request. It returns nil +// if project does not exist. +func (d *Domain) GetLookupPath(r *http.Request) (*serving.LookupPath, error) { + servingReq, err := d.resolve(r) + if err != nil { + return nil, err + } + + return servingReq.LookupPath, nil +} + +// IsHTTPSOnly figures out if the request should be handled with HTTPS +// only by looking at group and project level config. +func (d *Domain) IsHTTPSOnly(r *http.Request) bool { + if lookupPath, _ := d.GetLookupPath(r); lookupPath != nil { + return lookupPath.IsHTTPSOnly + } + + return false +} + +// IsAccessControlEnabled figures out if the request is to a project that has access control enabled +func (d *Domain) IsAccessControlEnabled(r *http.Request) bool { + if lookupPath, _ := d.GetLookupPath(r); lookupPath != nil { + return lookupPath.HasAccessControl + } + + return false +} + +// IsNamespaceProject figures out if the request is to a namespace project +func (d *Domain) IsNamespaceProject(r *http.Request) bool { + if lookupPath, _ := d.GetLookupPath(r); lookupPath != nil { + return lookupPath.IsNamespaceProject + } + + return false +} + +// GetProjectID figures out what is the ID of the project user tries to access +func (d *Domain) GetProjectID(r *http.Request) uint64 { + if lookupPath, _ := d.GetLookupPath(r); lookupPath != nil { + return lookupPath.ProjectID + } + + return 0 +} + +// HasLookupPath figures out if the project exists that the user tries to access +func (d *Domain) HasLookupPath(r *http.Request) bool { + if d == nil { + return false + } + _, err := d.GetLookupPath(r) + + return err == nil +} + +// EnsureCertificate parses the PEM-encoded certificate for the domain +func (d *Domain) EnsureCertificate() (*tls.Certificate, error) { + if d == nil || len(d.CertificateKey) == 0 || len(d.CertificateCert) == 0 { + return nil, errors.New("tls certificates can be loaded only for pages with configuration") + } + + d.certificateOnce.Do(func() { + var cert tls.Certificate + cert, d.certificateError = tls.X509KeyPair( + []byte(d.CertificateCert), + []byte(d.CertificateKey), + ) + if d.certificateError == nil { + d.certificate = &cert + } + }) + + return d.certificate, d.certificateError +} + +// ServeFileHTTP returns true if something was served, false if not. +func (d *Domain) ServeFileHTTP(w http.ResponseWriter, r *http.Request) bool { + request, err := d.resolve(r) + if err != nil { + if errors.Is(err, ErrDomainDoesNotExist) { + // serve generic 404 + httperrors.Serve404(w) + return true + } + + errortracking.Capture(err, errortracking.WithRequest(r)) + httperrors.Serve503(w) + return true + } + + return request.ServeFileHTTP(w, r) +} + +// ServeNotFoundHTTP serves the not found pages from the projects. +func (d *Domain) ServeNotFoundHTTP(w http.ResponseWriter, r *http.Request) { + request, err := d.resolve(r) + if err != nil { + if errors.Is(err, ErrDomainDoesNotExist) { + // serve generic 404 + httperrors.Serve404(w) + return + } + + errortracking.Capture(err, errortracking.WithRequest(r)) + httperrors.Serve503(w) + return + } + + request.ServeNotFoundHTTP(w, r) +} + +// serveNamespaceNotFound will try to find a parent namespace domain for a request +// that failed authentication so that we serve the custom namespace error page for +// public namespace domains +func (d *Domain) serveNamespaceNotFound(w http.ResponseWriter, r *http.Request) { + // clone r and override the path and try to resolve the domain name + clonedReq := r.Clone(context.Background()) + clonedReq.URL.Path = "/" + + namespaceDomain, err := d.Resolver.Resolve(clonedReq) + if err != nil { + if errors.Is(err, ErrDomainDoesNotExist) { + // serve generic 404 + httperrors.Serve404(w) + return + } + + errortracking.Capture(err, errortracking.WithRequest(r)) + httperrors.Serve503(w) + return + } + + // for namespace domains that have no access control enabled + if !namespaceDomain.LookupPath.HasAccessControl { + namespaceDomain.ServeNotFoundHTTP(w, r) + return + } + + httperrors.Serve404(w) +} + +// ServeNotFoundAuthFailed handler to be called when auth failed so the correct custom +// 404 page is served. +func (d *Domain) ServeNotFoundAuthFailed(w http.ResponseWriter, r *http.Request) { + lookupPath, err := d.GetLookupPath(r) + if err != nil { + httperrors.Serve404(w) + return + } + + if d.IsNamespaceProject(r) && !lookupPath.HasAccessControl { + d.ServeNotFoundHTTP(w, r) + return + } + + d.serveNamespaceNotFound(w, r) +} diff --git a/internal/domain/domain_test.go b/internal/domain/domain_test.go new file mode 100644 index 000000000..f053001a4 --- /dev/null +++ b/internal/domain/domain_test.go @@ -0,0 +1,219 @@ +package domain + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/fixture" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk/local" + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" +) + +type stubbedResolver struct { + project *serving.LookupPath + subpath string + err error +} + +func (resolver *stubbedResolver) Resolve(*http.Request) (*serving.Request, error) { + return &serving.Request{ + Serving: local.Instance(), + LookupPath: resolver.project, + SubPath: resolver.subpath, + }, resolver.err +} + +func serveFileOrNotFound(domain *Domain) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !domain.ServeFileHTTP(w, r) { + domain.ServeNotFoundHTTP(w, r) + } + } +} + +func TestIsHTTPSOnly(t *testing.T) { + tests := []struct { + name string + domain *Domain + url string + expected bool + }{ + { + name: "Custom domain with HTTPS-only enabled", + domain: New("custom-domain", "", "", + &stubbedResolver{ + project: &serving.LookupPath{ + Path: "group/project/public", + IsHTTPSOnly: true, + }, + }), + url: "http://custom-domain", + expected: true, + }, + { + name: "Custom domain with HTTPS-only disabled", + domain: New("custom-domain", "", "", + &stubbedResolver{ + project: &serving.LookupPath{ + Path: "group/project/public", + IsHTTPSOnly: false, + }, + }), + url: "http://custom-domain", + expected: false, + }, + { + name: "Unknown project", + domain: New("", "", "", &stubbedResolver{err: ErrDomainDoesNotExist}), + url: "http://test-domain/project", + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + req, _ := http.NewRequest(http.MethodGet, test.url, nil) + require.Equal(t, test.expected, test.domain.IsHTTPSOnly(req)) + }) + } +} + +func TestPredefined404ServeHTTP(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + testDomain := New("", "", "", &stubbedResolver{err: ErrDomainDoesNotExist}) + + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testDomain), "GET", "http://group.test.io/not-existing-file", nil, "The page you're looking for could not be found") +} + +func TestGroupCertificate(t *testing.T) { + testGroup := &Domain{} + + tls, err := testGroup.EnsureCertificate() + require.Nil(t, tls) + require.Error(t, err) +} + +func TestDomainNoCertificate(t *testing.T) { + testDomain := &Domain{ + Name: "test.domain.com", + } + + tls, err := testDomain.EnsureCertificate() + require.Nil(t, tls) + require.Error(t, err) + + _, err2 := testDomain.EnsureCertificate() + require.Error(t, err) + require.Equal(t, err, err2) +} + +func BenchmarkEnsureCertificate(b *testing.B) { + for i := 0; i < b.N; i++ { + testDomain := &Domain{ + Name: "test.domain.com", + CertificateCert: fixture.Certificate, + CertificateKey: fixture.Key, + } + + testDomain.EnsureCertificate() + } +} + +var chdirSet = false + +func setUpTests(t testing.TB) func() { + t.Helper() + return testhelpers.ChdirInPath(t, "../../shared/pages", &chdirSet) +} + +func TestServeNamespaceNotFound(t *testing.T) { + defer setUpTests(t)() + + tests := []struct { + name string + domain string + path string + resolver *stubbedResolver + expectedResponse string + }{ + { + name: "public_namespace_domain", + domain: "group.404.gitlab-example.com", + path: "/unknown", + resolver: &stubbedResolver{ + project: &serving.LookupPath{ + Path: "group.404/group.404.gitlab-example.com/public", + IsNamespaceProject: true, + }, + subpath: "/unknown", + }, + expectedResponse: "Custom 404 group page", + }, + { + name: "private_project_under_public_namespace_domain", + domain: "group.404.gitlab-example.com", + path: "/private_project/unknown", + resolver: &stubbedResolver{ + project: &serving.LookupPath{ + Path: "group.404/group.404.gitlab-example.com/public", + IsNamespaceProject: true, + HasAccessControl: false, + }, + subpath: "/", + }, + expectedResponse: "Custom 404 group page", + }, + { + name: "private_namespace_domain", + domain: "group.404.gitlab-example.com", + path: "/unknown", + resolver: &stubbedResolver{ + project: &serving.LookupPath{ + Path: "group.404/group.404.gitlab-example.com/public", + IsNamespaceProject: true, + HasAccessControl: true, + }, + subpath: "/", + }, + expectedResponse: "The page you're looking for could not be found.", + }, + { + name: "no_parent_namespace_domain", + domain: "group.404.gitlab-example.com", + path: "/unknown", + resolver: &stubbedResolver{ + err: ErrDomainDoesNotExist, + subpath: "/", + }, + expectedResponse: "The page you're looking for could not be found.", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &Domain{ + Name: tt.domain, + Resolver: tt.resolver, + } + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", fmt.Sprintf("http://%s%s", tt.domain, tt.path), nil) + d.serveNamespaceNotFound(w, r) + + resp := w.Result() + defer resp.Body.Close() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Contains(t, string(body), tt.expectedResponse) + }) + } +} diff --git a/internal/domain/resolver.go b/internal/domain/resolver.go new file mode 100644 index 000000000..4b93baed5 --- /dev/null +++ b/internal/domain/resolver.go @@ -0,0 +1,14 @@ +package domain + +import ( + "net/http" + + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" +) + +// Resolver represents an interface responsible for resolving a pages serving +// request for each HTTP request +type Resolver interface { + // Resolve returns a serving request and an error if it occurred + Resolve(*http.Request) (*serving.Request, error) +} diff --git a/internal/fixture/fixtures.go b/internal/fixture/fixtures.go new file mode 100644 index 000000000..e425da1bf --- /dev/null +++ b/internal/fixture/fixtures.go @@ -0,0 +1,60 @@ +package fixture + +const ( + // Certificate is used for HTTPS tests + Certificate = `-----BEGIN CERTIFICATE----- +MIIDZDCCAkygAwIBAgIRAOtN9/zy+gFjdsgpKq3QRdQwDQYJKoZIhvcNAQELBQAw +MzEUMBIGA1UEChMLTG9nIENvdXJpZXIxGzAZBgNVBAMTEmdpdGxhYi1leGFtcGxl +LmNvbTAgFw0xODAzMjMxODMwMDZaGA8yMTE4MDIyNzE4MzAwNlowMzEUMBIGA1UE +ChMLTG9nIENvdXJpZXIxGzAZBgNVBAMTEmdpdGxhYi1leGFtcGxlLmNvbTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKULsxpnazXX5RsVzayrAQB+lWwr +Wef5L5eDhSsIsBLbelYp5YB4TmVRt5x7bWKOOJSBsOfwHZHKJXdu+uuX2RenZlhk +3Qpq9XGaPZjYm/NHi8gBHPAtz5sG5VaKNvkfTzRGnO9CWA9TM1XtYiOBq94dO+H3 +c+5jP5Yw+mJ+hA+i2058zF8nRlUHArEno2ofrHwE0LMZ11VskpXtWnVfs3voLs8p +r76KXPBFkMJR4qkWrMDF5Y5MbsQ0zisn6KXrTyV0S4MQh4vSyPdFHnEzvJ07rm5x +4RTWrjgQeQ2DjZjQvRmaDzlVBK9kaMkJ1Si3agK+gpji6d6WZ/Mb2el1GK8CAwEA +AaNxMG8wDgYDVR0PAQH/BAQDAgKkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud +EwEB/wQFMAMBAf8wNwYDVR0RBDAwLoIUKi5naXRsYWItZXhhbXBsZS5jb22HBH8A +AAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBAJ0NM8apK0xI +YxMstP/dCQXtR0wyREGSD/eOpeY3bWlqCbpRgMFUGjQlrsEozcPZOCSCKX5p+tym +7GsnYtXkwbsuURoSz+5IlhRPVHcUlUeGRdv3/gCd8fDXiigALCsB6GrkMG5cUfh+ +x5p52AC3eQdWTDoxNou+2gzwkAl8iJc13Ykusst0YUqcsXKqTuei2quxFv0pEBSO +p8wEixoicLFNqPnIDmgx5894DAn0bccNXgRWtq8lLbdhGUlBbpatevvFMgNvFUbe +eeGb9D0EfpxmzxUl+L0xZtfg3f7cu5AgLG8tb6l4AK6NPVuXN8DmUgvnauWJjZME +fgStI+IRNVg= +-----END CERTIFICATE----- +` + + // Key is used for HTTPS tests + Key = `-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApQuzGmdrNdflGxXNrKsBAH6VbCtZ5/kvl4OFKwiwEtt6Vinl +gHhOZVG3nHttYo44lIGw5/Adkcold27665fZF6dmWGTdCmr1cZo9mNib80eLyAEc +8C3PmwblVoo2+R9PNEac70JYD1MzVe1iI4Gr3h074fdz7mM/ljD6Yn6ED6LbTnzM +XydGVQcCsSejah+sfATQsxnXVWySle1adV+ze+guzymvvopc8EWQwlHiqRaswMXl +jkxuxDTOKyfopetPJXRLgxCHi9LI90UecTO8nTuubnHhFNauOBB5DYONmNC9GZoP +OVUEr2RoyQnVKLdqAr6CmOLp3pZn8xvZ6XUYrwIDAQABAoIBAHhP5QnUZeTkMtDh +vgKmzZ4sqIQnvexKTBUo/MR4GtJESBPTisdx68QUI8LgfsafYkNvnyQUd5m1QEam +Eif3k3uYvhSlwjQ78BwWEdz/2f8oIo9zsEKtQm+CQWAqdRR5bGVxLCmFtWfGgN+c +ojO77SuHKAX7OvmGQ+4aWgu+qkoyg/chIpPXMduAjLMtN3eg60ZqJ5KrKuIF63Bb +xkPQvzJueB9SfUurmKjUltDMx6G/9RZyS0OIRGyL9Qp8MZ8jE23cXOcDgm0HhkPq +W4LU++aWAOLYziTjnhjJ+4Iz9R7U8sCmk1wgnK/tapVcJf41R98WuGluyjXpsXgA +k7vmofECgYEAzuGun9lZ7xGwPifp6vGanWXnW+JiZgCTGuHWgQLIXWYcLfoI3kpH +eLBYINBwvjIQ7P6UxsGSSXd+T82t+8W2LLc2fiKFKE1LVySpH99+cfmIPXxrviOz +GBX9LTdSCdGkgb54m8aJCpNFnKw5wYgcW1L8CaXXly2Z/KNrGR9R/YUCgYEAzDs4 +19HqlutGLTC30/ziiiIfDaBbX9AzBdUfp9GdT53Mi/7bfxpW/sL4RjG2fGgmN6ua +fh5npT9AB1ldcEg2qfyOJPt1Ubdi6ek9lx8AB2RMhwdihgX+7bjVMFtjg4b8z5C1 +jQbEr1rhFdpaGyNehtAXDgCbDWQBYnBrmM0rCaMCgYBip1Qyfd9ZFcJJoZb2pofo +jvOo6Weq5JNBungjxUPu5gaCFj2sYxd6Af3EiCF7UTypBy3DKgOsbQMa4yYYbcvV +vviJZcTB1zoaMC1GObl+eFPzniVy4mtBDRtSOJMyg3pDNKUnA6HOHTSQ5cAU/ecn +1YbCwwbv3JsV0of7zue2UQKBgQCVc0j3dd9rLSQfcaUz9bx5RNrgh9YV2S9dN0aA +8f1iA6FpWMiazFWY/GfeRga6JyTAXE0juXAzFoPuXNDpl46Y+f2yxmhlsgMqFMpD +SiYlQppVvWu1k7GnmDg5uMarux5JbiXM24UWpTRNX4nMjidgE+qrDnpoZCQ3Ovkh +yhGSbQKBgD3VEnPiSUmXBo39kPcnPg93E3JfdAOiOwIB2qwfYzg9kpmuTWws+DFz +lKpMI27YkmnPqROQ2NTUfdxYmw3EHHMAsvnmHeMNGn3ijSUZVKmPfV436Qc8iVci +s4wKoCRhBUZ52sHki/ieb+5hycT3JnVXMDtbJxgXFW5a86usXEpO +-----END RSA PRIVATE KEY-----` + + // GitLabAPISecretKey used in tests + // 32 bytes, base64 encoded + GitLabAPISecretKey = "e41rcFh7XBA7sNABWVCe2AZvxMsy6QDtJ8S9Ql1UiN8=" +) diff --git a/internal/handlers/handlers.go b/internal/handlers/handlers.go new file mode 100644 index 000000000..fb47fc55b --- /dev/null +++ b/internal/handlers/handlers.go @@ -0,0 +1,68 @@ +package handlers + +import ( + "net/http" + + "gitlab.com/gitlab-org/gitlab-pages/internal" + "gitlab.com/gitlab-org/gitlab-pages/internal/logging" +) + +// Handlers take care of handling specific requests +type Handlers struct { + Auth internal.Auth + Artifact internal.Artifact +} + +// New when provided the arguments defined herein, returns a pointer to an +// Handlers that is used to handle requests. +func New(auth internal.Auth, artifact internal.Artifact) *Handlers { + return &Handlers{ + Auth: auth, + Artifact: artifact, + } +} + +func (a *Handlers) checkIfLoginRequiredOrInvalidToken(w http.ResponseWriter, r *http.Request, token string) func(*http.Response) bool { + return func(resp *http.Response) bool { + if resp.StatusCode == http.StatusNotFound { + if token == "" { + if !a.Auth.IsAuthSupported() { + // Auth is not supported, probably means no access or does not exist but we cannot try with auth + return false + } + + logging.LogRequest(r).Debug("Artifact API response was 404 without token, try with authentication") + + // Authenticate user + if a.Auth.RequireAuth(w, r) { + return true + } + } else { + logging.LogRequest(r).Debug("Artifact API response was 404 with authentication") + } + } + + if a.Auth.CheckResponseForInvalidToken(w, r, resp) { + return true + } + + return false + } +} + +// HandleArtifactRequest handles all artifact related requests, will return true if request was handled here +func (a *Handlers) HandleArtifactRequest(host string, w http.ResponseWriter, r *http.Request) bool { + // In the event a host is prefixed with the artifact prefix an artifact + // value is created, and an attempt to proxy the request is made + + // Always try to add token to the request if it exists + token, err := a.Auth.GetTokenIfExists(w, r) + if err != nil { + return true + } + + // nolint: bodyclose + // a.checkIfLoginRequiredOrInvalidToken returns a response.Body, closing this body is responsibility + // of the TryMakeRequest implementation + return a.Artifact.TryMakeRequest(host, w, r, token, a.checkIfLoginRequiredOrInvalidToken(w, r, token)) +} diff --git a/internal/handlers/handlers_test.go b/internal/handlers/handlers_test.go new file mode 100644 index 000000000..aa664266b --- /dev/null +++ b/internal/handlers/handlers_test.go @@ -0,0 +1,161 @@ +package handlers + +import ( + "errors" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "gitlab.com/gitlab-org/gitlab-pages/internal/mocks" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestNotHandleArtifactRequestReturnsFalse(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockArtifact := mocks.NewMockArtifact(mockCtrl) + mockArtifact.EXPECT(). + TryMakeRequest(gomock.Any(), gomock.Any(), gomock.Any(), "", gomock.Any()). + Return(false). + Times(1) + + mockAuth := mocks.NewMockAuth(mockCtrl) + mockAuth.EXPECT(). + GetTokenIfExists(gomock.Any(), gomock.Any()). + Return("", nil). + Times(1) + + handlers := New(mockAuth, mockArtifact) + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/something") + require.NoError(t, err) + r := &http.Request{URL: reqURL} + + require.Equal(t, false, handlers.HandleArtifactRequest("host", result, r)) +} + +func TestHandleArtifactRequestedReturnsTrue(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockArtifact := mocks.NewMockArtifact(mockCtrl) + mockArtifact.EXPECT(). + TryMakeRequest(gomock.Any(), gomock.Any(), gomock.Any(), "", gomock.Any()). + Return(true). + Times(1) + + mockAuth := mocks.NewMockAuth(mockCtrl) + mockAuth.EXPECT(). + GetTokenIfExists(gomock.Any(), gomock.Any()). + Return("", nil). + Times(1) + + handlers := New(mockAuth, mockArtifact) + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/something") + require.NoError(t, err) + r := &http.Request{URL: reqURL} + + require.Equal(t, true, handlers.HandleArtifactRequest("host", result, r)) +} + +func TestNotFoundWithTokenIsNotHandled(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockAuth := mocks.NewMockAuth(mockCtrl) + mockAuth.EXPECT().CheckResponseForInvalidToken(gomock.Any(), gomock.Any(), gomock.Any()). + Return(false) + + handlers := New(mockAuth, nil) + + w := httptest.NewRecorder() + reqURL, _ := url.Parse("/") + r := &http.Request{URL: reqURL} + response := &http.Response{StatusCode: http.StatusNotFound} + handled := handlers.checkIfLoginRequiredOrInvalidToken(w, r, "token")(response) + + require.False(t, handled) +} + +func TestNotFoundWithoutTokenIsNotHandledWhenNotAuthSupport(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockAuth := mocks.NewMockAuth(mockCtrl) + mockAuth.EXPECT().IsAuthSupported().Return(false) + + handlers := New(mockAuth, nil) + + w := httptest.NewRecorder() + reqURL, _ := url.Parse("/") + r := &http.Request{URL: reqURL} + response := &http.Response{StatusCode: http.StatusNotFound} + handled := handlers.checkIfLoginRequiredOrInvalidToken(w, r, "")(response) + + require.False(t, handled) +} +func TestNotFoundWithoutTokenIsHandled(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockAuth := mocks.NewMockAuth(mockCtrl) + mockAuth.EXPECT().IsAuthSupported().Return(true) + mockAuth.EXPECT().RequireAuth(gomock.Any(), gomock.Any()).Times(1).Return(true) + + handlers := New(mockAuth, nil) + + w := httptest.NewRecorder() + reqURL, _ := url.Parse("/") + r := &http.Request{URL: reqURL} + response := &http.Response{StatusCode: http.StatusNotFound} + handled := handlers.checkIfLoginRequiredOrInvalidToken(w, r, "")(response) + + require.True(t, handled) +} +func TestInvalidTokenResponseIsHandled(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockAuth := mocks.NewMockAuth(mockCtrl) + mockAuth.EXPECT().CheckResponseForInvalidToken(gomock.Any(), gomock.Any(), gomock.Any()). + Return(true) + + handlers := New(mockAuth, nil) + + w := httptest.NewRecorder() + reqURL, _ := url.Parse("/") + r := &http.Request{URL: reqURL} + response := &http.Response{StatusCode: http.StatusUnauthorized} + handled := handlers.checkIfLoginRequiredOrInvalidToken(w, r, "token")(response) + + require.True(t, handled) +} + +func TestHandleArtifactRequestButGetTokenFails(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockArtifact := mocks.NewMockArtifact(mockCtrl) + mockArtifact.EXPECT(). + TryMakeRequest(gomock.Any(), gomock.Any(), gomock.Any(), "", gomock.Any()). + Times(0) + + mockAuth := mocks.NewMockAuth(mockCtrl) + mockAuth.EXPECT().GetTokenIfExists(gomock.Any(), gomock.Any()).Return("", errors.New("error when retrieving token")) + + handlers := New(mockAuth, mockArtifact) + + result := httptest.NewRecorder() + reqURL, err := url.Parse("/something") + require.NoError(t, err) + r := &http.Request{URL: reqURL} + + require.Equal(t, true, handlers.HandleArtifactRequest("host", result, r)) +} diff --git a/internal/host/host.go b/internal/host/host.go new file mode 100644 index 000000000..dd9b1c8a2 --- /dev/null +++ b/internal/host/host.go @@ -0,0 +1,23 @@ +package host + +import ( + "net" + "net/http" + "strings" +) + +// FromString tries to split host port from string, returns host or initial string if fail +func FromString(s string) string { + host := strings.ToLower(s) + + if splitHost, _, err := net.SplitHostPort(host); err == nil { + host = splitHost + } + + return host +} + +// FromRequest tries to split host port from r.Host, returns host or initial string if fail +func FromRequest(r *http.Request) string { + return FromString(r.Host) +} diff --git a/internal/host/host_test.go b/internal/host/host_test.go new file mode 100644 index 000000000..7168eb1b4 --- /dev/null +++ b/internal/host/host_test.go @@ -0,0 +1,18 @@ +package host + +import ( + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFromString(t *testing.T) { + require.Equal(t, "example.com", FromString("example.com")) + require.Equal(t, "example.com", FromString("eXAmpLe.com")) + require.Equal(t, "example.com", FromString("example.com:8080")) +} + +func TestFromRequest(t *testing.T) { + require.Equal(t, "example.com", FromRequest(httptest.NewRequest("GET", "example.com:8080/123", nil))) +} diff --git a/internal/httperrors/httperrors.go b/internal/httperrors/httperrors.go new file mode 100644 index 000000000..476d270c8 --- /dev/null +++ b/internal/httperrors/httperrors.go @@ -0,0 +1,202 @@ +package httperrors + +import ( + "fmt" + "net/http" + + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/labkit/errortracking" +) + +type content struct { + status int + title string + statusString string + header string + subHeader string +} + +var ( + content401 = content{ + http.StatusUnauthorized, + "Unauthorized (401)", + "401", + "You don't have permission to access the resource.", + `

The resource that you are attempting to access is protected and you don't have the necessary permissions to view it.

`, + } + content404 = content{ + http.StatusNotFound, + "The page you're looking for could not be found (404)", + "404", + "The page you're looking for could not be found.", + `

The resource that you are attempting to access does not exist or you don't have the necessary permissions to view it.

+

Make sure the address is correct and that the page hasn't moved.

+

Please contact your GitLab administrator if you think this is a mistake.

`, + } + content500 = content{ + http.StatusInternalServerError, + "Something went wrong (500)", + "500", + "Whoops, something went wrong on our end.", + `

Try refreshing the page, or going back and attempting the action again.

+

Please contact your GitLab administrator if this problem persists.

`, + } + + content502 = content{ + http.StatusBadGateway, + "Something went wrong (502)", + "502", + "Whoops, something went wrong on our end.", + `

Try refreshing the page, or going back and attempting the action again.

+

Please contact your GitLab administrator if this problem persists.

`, + } + + content503 = content{ + http.StatusServiceUnavailable, + "Service Unavailable (503)", + "503", + "Whoops, something went wrong on our end.", + `

Try refreshing the page, or going back and attempting the action again.

+

Please contact your GitLab administrator if this problem persists.

`, + } +) + +const predefinedErrorPage = ` + + + + + %v + + + + + GitLab Logo +

+ %v +

+
+

%v

+
+ %v + Go back +
+ + + +` + +func generateErrorHTML(c content) string { + return fmt.Sprintf(predefinedErrorPage, c.title, c.statusString, c.header, c.subHeader) +} + +func serveErrorPage(w http.ResponseWriter, c content) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(c.status) + fmt.Fprintln(w, generateErrorHTML(c)) +} + +// Serve401 returns a 401 error response / HTML page to the http.ResponseWriter +func Serve401(w http.ResponseWriter) { + serveErrorPage(w, content401) +} + +// Serve404 returns a 404 error response / HTML page to the http.ResponseWriter +func Serve404(w http.ResponseWriter) { + serveErrorPage(w, content404) +} + +// Serve500 returns a 500 error response / HTML page to the http.ResponseWriter +func Serve500(w http.ResponseWriter) { + serveErrorPage(w, content500) +} + +// Serve500WithRequest returns a 500 error response / HTML page to the http.ResponseWriter +func Serve500WithRequest(w http.ResponseWriter, r *http.Request, reason string, err error) { + log.WithFields(log.Fields{ + "host": r.Host, + "path": r.URL.Path, + }).WithError(err).Error(reason) + errortracking.Capture(err, errortracking.WithRequest(r)) + serveErrorPage(w, content500) +} + +// Serve502 returns a 502 error response / HTML page to the http.ResponseWriter +func Serve502(w http.ResponseWriter) { + serveErrorPage(w, content502) +} + +// Serve503 returns a 503 error response / HTML page to the http.ResponseWriter +func Serve503(w http.ResponseWriter) { + serveErrorPage(w, content503) +} diff --git a/internal/httperrors/httperrors_test.go b/internal/httperrors/httperrors_test.go new file mode 100644 index 000000000..b003ce6f2 --- /dev/null +++ b/internal/httperrors/httperrors_test.go @@ -0,0 +1,117 @@ +package httperrors + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +// creates a new implementation of http.ResponseWriter that allows the +// casting of values in order to aid testing efforts. +type testResponseWriter struct { + status int + content string + http.ResponseWriter +} + +func newTestResponseWriter(w http.ResponseWriter) *testResponseWriter { + return &testResponseWriter{0, "", w} +} + +func (w *testResponseWriter) Status() int { + return w.status +} + +func (w *testResponseWriter) Content() string { + return w.content +} + +func (w *testResponseWriter) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *testResponseWriter) Write(data []byte) (int, error) { + w.content = string(data) + return w.ResponseWriter.Write(data) +} + +func (w *testResponseWriter) WriteHeader(statusCode int) { + w.status = statusCode + w.ResponseWriter.WriteHeader(statusCode) +} + +var ( + testingContent = content{ + http.StatusNotFound, + "Title", + "533", + "Header test", + "subheader text", + } +) + +func TestGenerateemailHTML(t *testing.T) { + actual := generateErrorHTML(testingContent) + require.Contains(t, actual, testingContent.title) + require.Contains(t, actual, testingContent.statusString) + require.Contains(t, actual, testingContent.header) + require.Contains(t, actual, testingContent.subHeader) +} + +func TestServeErrorPage(t *testing.T) { + w := newTestResponseWriter(httptest.NewRecorder()) + serveErrorPage(w, testingContent) + require.Equal(t, w.Header().Get("Content-Type"), "text/html; charset=utf-8") + require.Equal(t, w.Header().Get("X-Content-Type-Options"), "nosniff") + require.Equal(t, w.Status(), testingContent.status) +} + +func TestServe401(t *testing.T) { + w := newTestResponseWriter(httptest.NewRecorder()) + Serve401(w) + require.Equal(t, w.Header().Get("Content-Type"), "text/html; charset=utf-8") + require.Equal(t, w.Header().Get("X-Content-Type-Options"), "nosniff") + require.Equal(t, w.Status(), content401.status) + require.Contains(t, w.Content(), content401.title) + require.Contains(t, w.Content(), content401.statusString) + require.Contains(t, w.Content(), content401.header) + require.Contains(t, w.Content(), content401.subHeader) +} + +func TestServe404(t *testing.T) { + w := newTestResponseWriter(httptest.NewRecorder()) + Serve404(w) + require.Equal(t, w.Header().Get("Content-Type"), "text/html; charset=utf-8") + require.Equal(t, w.Header().Get("X-Content-Type-Options"), "nosniff") + require.Equal(t, w.Status(), content404.status) + require.Contains(t, w.Content(), content404.title) + require.Contains(t, w.Content(), content404.statusString) + require.Contains(t, w.Content(), content404.header) + require.Contains(t, w.Content(), content404.subHeader) +} + +func TestServe500(t *testing.T) { + w := newTestResponseWriter(httptest.NewRecorder()) + Serve500(w) + require.Equal(t, w.Header().Get("Content-Type"), "text/html; charset=utf-8") + require.Equal(t, w.Header().Get("X-Content-Type-Options"), "nosniff") + require.Equal(t, w.Status(), content500.status) + require.Contains(t, w.Content(), content500.title) + require.Contains(t, w.Content(), content500.statusString) + require.Contains(t, w.Content(), content500.header) + require.Contains(t, w.Content(), content500.subHeader) +} + +func TestServe502(t *testing.T) { + w := newTestResponseWriter(httptest.NewRecorder()) + Serve502(w) + require.Equal(t, w.Header().Get("Content-Type"), "text/html; charset=utf-8") + require.Equal(t, w.Header().Get("X-Content-Type-Options"), "nosniff") + require.Equal(t, w.Status(), content502.status) + require.Contains(t, w.Content(), content502.title) + require.Contains(t, w.Content(), content502.statusString) + require.Contains(t, w.Content(), content502.header) + require.Contains(t, w.Content(), content502.subHeader) +} diff --git a/internal/httprange/http_ranged_reader.go b/internal/httprange/http_ranged_reader.go new file mode 100644 index 000000000..babff1aa7 --- /dev/null +++ b/internal/httprange/http_ranged_reader.go @@ -0,0 +1,63 @@ +package httprange + +import ( + "context" + "io" +) + +// RangedReader for a resource. +// Implements the io.ReaderAt interface that can be used with Go's archive/zip package. +type RangedReader struct { + Resource *Resource + cachedReader *Reader +} + +func (rr *RangedReader) cachedRead(buf []byte, off int64) (int, error) { + _, err := rr.cachedReader.Seek(off, io.SeekStart) + if err != nil { + return 0, err + } + + return io.ReadFull(rr.cachedReader, buf) +} + +func (rr *RangedReader) ephemeralRead(buf []byte, offset int64) (n int, err error) { + // we can use context.Background and rely on the Reader's httpClient timeout for ephemeral reads + reader := NewReader(context.Background(), rr.Resource, offset, int64(len(buf))) + defer reader.Close() + + return io.ReadFull(reader, buf) +} + +// SectionReader partitions a resource from `offset` with a specified `size` +func (rr *RangedReader) SectionReader(ctx context.Context, offset, size int64) *Reader { + return NewReader(ctx, rr.Resource, offset, size) +} + +// ReadAt reads from cachedReader if exists, otherwise fetches a new Resource first. +// Opens a resource and reads len(buf) bytes from offset into buf. +func (rr *RangedReader) ReadAt(buf []byte, offset int64) (n int, err error) { + if rr.cachedReader != nil { + return rr.cachedRead(buf, offset) + } + + return rr.ephemeralRead(buf, offset) +} + +// WithCachedReader creates a Reader and saves it to the RangedReader instance. +// It takes a readFunc that will Seek the contents from Reader. +func (rr *RangedReader) WithCachedReader(ctx context.Context, readFunc func()) { + rr.cachedReader = NewReader(ctx, rr.Resource, 0, rr.Resource.Size) + + defer func() { + rr.cachedReader.Close() + rr.cachedReader = nil + }() + + readFunc() +} + +// NewRangedReader creates a RangedReader object on a given resource +func NewRangedReader(resource *Resource) *RangedReader { + return &RangedReader{Resource: resource} +} diff --git a/internal/httprange/http_ranged_reader_test.go b/internal/httprange/http_ranged_reader_test.go new file mode 100644 index 000000000..b17d06b11 --- /dev/null +++ b/internal/httprange/http_ranged_reader_test.go @@ -0,0 +1,293 @@ +package httprange + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +const ( + testData = "1234567890abcdefghij0987654321" + testDataLen = len(testData) +) + +func TestSectionReader(t *testing.T) { + tests := map[string]struct { + sectionOffset int + sectionSize int + readSize int + expectedContent string + expectedErr error + }{ + "no_buffer_no_err": { + sectionOffset: 0, + sectionSize: testDataLen, + readSize: 0, + expectedContent: "", + expectedErr: nil, + }, + "offset_starts_at_size": { + sectionOffset: testDataLen, + sectionSize: 1, + readSize: 1, + expectedContent: "", + expectedErr: ErrInvalidRange, + }, + "read_all": { + sectionOffset: 0, + sectionSize: testDataLen, + readSize: testDataLen, + expectedContent: testData, + expectedErr: io.EOF, + }, + "read_first_half": { + sectionOffset: 0, + sectionSize: testDataLen / 2, + readSize: testDataLen / 2, + expectedContent: testData[:testDataLen/2], + expectedErr: io.EOF, + }, + "read_second_half": { + sectionOffset: testDataLen / 2, + sectionSize: testDataLen / 2, + readSize: testDataLen / 2, + expectedContent: testData[testDataLen/2:], + expectedErr: io.EOF, + }, + "read_15_bytes_with_offset": { + sectionOffset: 3, + sectionSize: testDataLen / 2, + readSize: testDataLen / 2, + expectedContent: testData[3 : 3+testDataLen/2], + expectedErr: io.EOF, + }, + "read_13_bytes_with_offset": { + sectionOffset: 10, + sectionSize: testDataLen/2 - 2, + readSize: testDataLen/2 - 2, + expectedContent: testData[10 : 10+testDataLen/2-2], + expectedErr: io.EOF, + }, + } + + testServer := newTestServer(t, nil) + defer testServer.Close() + + resource, err := NewResource(context.Background(), testServer.URL+"/resource") + require.NoError(t, err) + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + rr := NewRangedReader(resource) + s := rr.SectionReader(context.Background(), int64(tt.sectionOffset), int64(tt.sectionSize)) + defer s.Close() + + buf := make([]byte, tt.readSize) + n, err := s.Read(buf) + if tt.expectedErr != nil && err != io.EOF { + require.EqualError(t, err, tt.expectedErr.Error()) + return + } + + require.Equal(t, tt.expectedErr, err) + require.Equal(t, len(tt.expectedContent), n) + require.Equal(t, tt.expectedContent, string(buf[:n])) + }) + } +} + +func TestReadAt(t *testing.T) { + tests := map[string]struct { + sectionOffset int + readSize int + expectedContent string + expectedErr error + }{ + "no_buffer_no_err": { + sectionOffset: 0, + readSize: 0, + expectedContent: "", + expectedErr: nil, + }, + "offset_starts_at_size": { + sectionOffset: testDataLen, + readSize: 1, + expectedContent: "", + expectedErr: ErrInvalidRange, + }, + "read_at_end": { + sectionOffset: testDataLen, + readSize: 1, + expectedContent: "", + expectedErr: ErrInvalidRange, + }, + "read_all": { + sectionOffset: 0, + readSize: testDataLen, + expectedContent: testData, + expectedErr: nil, + }, + "read_first_half": { + sectionOffset: 0, + readSize: testDataLen / 2, + expectedContent: testData[:testDataLen/2], + expectedErr: nil, + }, + "read_second_half": { + sectionOffset: testDataLen / 2, + readSize: testDataLen / 2, + expectedContent: testData[testDataLen/2:], + expectedErr: nil, + }, + "read_15_bytes_with_offset": { + sectionOffset: 3, + readSize: testDataLen / 2, + expectedContent: testData[3 : 3+testDataLen/2], + expectedErr: nil, + }, + "read_13_bytes_with_offset": { + sectionOffset: 10, + readSize: testDataLen/2 - 2, + expectedContent: testData[10 : 10+testDataLen/2-2], + expectedErr: nil, + }, + } + + testServer := newTestServer(t, nil) + defer testServer.Close() + + resource, err := NewResource(context.Background(), testServer.URL+"/resource") + require.NoError(t, err) + + for name, tt := range tests { + rr := NewRangedReader(resource) + testFn := func(reader *RangedReader) func(t *testing.T) { + return func(t *testing.T) { + buf := make([]byte, tt.readSize) + + n, err := reader.ReadAt(buf, int64(tt.sectionOffset)) + if tt.expectedErr != nil { + require.EqualError(t, err, tt.expectedErr.Error()) + return + } + + require.NoError(t, err) + require.Equal(t, len(tt.expectedContent), n) + require.Equal(t, tt.expectedContent, string(buf[:n])) + } + } + + t.Run(name, func(t *testing.T) { + rr.WithCachedReader(context.Background(), func() { + t.Run("cachedReader", testFn(rr)) + }) + + t.Run("ephemeralReader", testFn(rr)) + }) + } +} + +func TestReadAtMultipart(t *testing.T) { + var counter int32 + + testServer := newTestServer(t, func() { + atomic.AddInt32(&counter, 1) + }) + defer testServer.Close() + + resource, err := NewResource(context.Background(), testServer.URL+"/resource") + require.NoError(t, err) + require.Equal(t, int32(1), counter) + + rr := NewRangedReader(resource) + + assertReadAtFunc := func(t *testing.T, bufLen, offset int, expectedDat string, expectedCounter int32) { + buf := make([]byte, bufLen) + n, err := rr.ReadAt(buf, int64(offset)) + require.NoError(t, err) + require.Equal(t, expectedCounter, counter) + + require.NoError(t, err) + require.Equal(t, bufLen, n) + require.Equal(t, expectedDat, string(buf)) + } + bufLen := testDataLen / 3 + + t.Run("ephemeralRead", func(t *testing.T) { + // "1234567890" + assertReadAtFunc(t, bufLen, 0, testData[:bufLen], 2) + // "abcdefghij" + assertReadAtFunc(t, bufLen, bufLen, testData[bufLen:2*bufLen], 3) + // "0987654321" + assertReadAtFunc(t, bufLen, 2*bufLen, testData[2*bufLen:], 4) + }) + + // cachedReader should not make extra requests, the expectedCounter should always be the same + counter = 1 + t.Run("cachedReader", func(t *testing.T) { + rr.WithCachedReader(context.Background(), func() { + // "1234567890" + assertReadAtFunc(t, bufLen, 0, testData[:bufLen], 2) + // "abcdefghij" + assertReadAtFunc(t, bufLen, bufLen, testData[bufLen:2*bufLen], 2) + // "0987654321" + assertReadAtFunc(t, bufLen, 2*bufLen, testData[2*bufLen:], 2) + }) + }) +} + +func TestReadContextCanceled(t *testing.T) { + testServer := newTestServer(t, nil) + defer testServer.Close() + + resource, err := NewResource(context.Background(), testServer.URL+"/resource") + require.NoError(t, err) + + rr := NewRangedReader(resource) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + t.Run("section_reader", func(t *testing.T) { + s := rr.SectionReader(ctx, 0, resource.Size) + + buf := make([]byte, resource.Size) + n, err := s.Read(buf) + require.Error(t, err) + require.Contains(t, err.Error(), "context canceled") + require.Zero(t, n) + }) + + t.Run("cached_reader", func(t *testing.T) { + rr.WithCachedReader(ctx, func() { + buf := make([]byte, resource.Size) + n, err := rr.ReadAt(buf, int64(0)) + require.Error(t, err) + require.Contains(t, err.Error(), "context canceled") + require.Zero(t, n) + }) + }) +} + +func newTestServer(t *testing.T, do func()) *httptest.Server { + t.Helper() + + // use a constant known time or else http.ServeContent will change Last-Modified value + tNow, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + require.NoError(t, err) + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if do != nil { + do() + } + + http.ServeContent(w, r, r.URL.Path, tNow, strings.NewReader(testData)) + })) +} diff --git a/internal/httprange/http_reader.go b/internal/httprange/http_reader.go new file mode 100644 index 000000000..589351fa1 --- /dev/null +++ b/internal/httprange/http_reader.go @@ -0,0 +1,215 @@ +package httprange + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "gitlab.com/gitlab-org/gitlab-pages/internal/httptransport" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +var ( + // ErrNotFound is returned when servers responds with 404 + ErrNotFound = errors.New("resource not found") + + // ErrRangeRequestsNotSupported is returned by Seek and Read + // when the remote server does not allow range requests for a given request parameters + ErrRangeRequestsNotSupported = errors.New("requests range is not supported by the remote server") + + // ErrInvalidRange is returned by Read when trying to read past the end of the file + ErrInvalidRange = errors.New("invalid range") + + // seek errors no need to export them + errSeekInvalidWhence = errors.New("invalid whence") + errSeekOutsideRange = errors.New("outside of range") +) + +// Reader holds a Resource and specifies ranges to read from at a time. +// Implements the io.Reader, io.Seeker and io.Closer interfaces. +type Reader struct { + // ctx for read requests + ctx context.Context + // Resource to read from + Resource *Resource + // res defines a current response serving data + res *http.Response + // rangeStart defines a starting range + rangeStart int64 + // rangeSize defines a size of range + rangeSize int64 + // offset defines a current place where data is being read from + offset int64 +} + +// ensure that Reader is seekable +var _ vfs.SeekableFile = &Reader{} + +// TODO: make this configurable/take an http client when creating a reader/ranged reader +// instead https://gitlab.com/gitlab-org/gitlab-pages/-/issues/457 +var httpClient = &http.Client{ + // The longest time the request can be executed + Timeout: 30 * time.Minute, + Transport: httptransport.NewTransportWithMetrics( + "httprange_client", + metrics.HTTPRangeTraceDuration, + metrics.HTTPRangeRequestDuration, + metrics.HTTPRangeRequestsTotal, + httptransport.DefaultTTFBTimeout, + ), +} + +// ensureResponse is set before reading from it. +// It will do the request if the reader hasn't got it yet. +func (r *Reader) ensureResponse() error { + if r.res != nil { + return nil + } + + req, err := r.prepareRequest() + if err != nil { + return err + } + + metrics.HTTPRangeOpenRequests.Inc() + + res, err := httpClient.Do(req) + if err != nil { + metrics.HTTPRangeOpenRequests.Dec() + return err + } + + err = r.setResponse(res) + if err != nil { + metrics.HTTPRangeOpenRequests.Dec() + + // cleanup body on failure from r.setResponse to avoid memory leak + res.Body.Close() + } + + return err +} + +func (r *Reader) prepareRequest() (*http.Request, error) { + if r.rangeStart < 0 || r.rangeSize < 0 || r.rangeStart+r.rangeSize > r.Resource.Size { + return nil, ErrInvalidRange + } + + if r.offset < r.rangeStart || r.offset >= r.rangeStart+r.rangeSize { + return nil, ErrInvalidRange + } + + req, err := r.Resource.Request() + if err != nil { + return nil, err + } + + req = req.WithContext(r.ctx) + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", r.offset, r.rangeStart+r.rangeSize-1)) + + return req, nil +} + +func (r *Reader) setResponse(res *http.Response) error { + // TODO: add metrics https://gitlab.com/gitlab-org/gitlab-pages/-/issues/448 + switch res.StatusCode { + case http.StatusOK: + // some servers return 200 OK for bytes=0- + // TODO: should we handle r.Resource.Last-Modified as well? + if r.offset > 0 || r.Resource.ETag != "" && r.Resource.ETag != res.Header.Get("ETag") { + r.Resource.setError(ErrRangeRequestsNotSupported) + return ErrRangeRequestsNotSupported + } + case http.StatusNotFound: + r.Resource.setError(ErrNotFound) + return ErrNotFound + case http.StatusPartialContent: + // Requested `Range` request succeeded https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/206 + break + case http.StatusRequestedRangeNotSatisfiable: + r.Resource.setError(ErrRangeRequestsNotSupported) + return ErrRangeRequestsNotSupported + default: + return fmt.Errorf("httprange: read response %d: %q", res.StatusCode, res.Status) + } + + r.res = res + + return nil +} + +// Seek returns the new offset relative to the start of the file and an error, if any. +// io.SeekStart means relative to the start of the file, +// io.SeekCurrent means relative to the current offset, and +// io.SeekEnd means relative to the end. +func (r *Reader) Seek(offset int64, whence int) (int64, error) { + var newOffset int64 + + switch whence { + case io.SeekStart: + newOffset = r.rangeStart + offset + + case io.SeekCurrent: + newOffset = r.offset + offset + + case io.SeekEnd: + newOffset = r.rangeStart + r.rangeSize + offset + + default: + return 0, errSeekInvalidWhence + } + + if newOffset < r.rangeStart || newOffset > r.rangeStart+r.rangeSize { + return 0, errSeekOutsideRange + } + + if newOffset != r.offset { + // recycle r.res + r.Close() + } + + r.offset = newOffset + return newOffset - r.rangeStart, nil +} + +// Read data into a given buffer. +func (r *Reader) Read(buf []byte) (int, error) { + if len(buf) == 0 { + return 0, nil + } + + if err := r.ensureResponse(); err != nil { + return 0, err + } + + n, err := r.res.Body.Read(buf) + if err == nil || err == io.EOF { + r.offset += int64(n) + } + + return n, err +} + +// Close closes a requests body +func (r *Reader) Close() error { + if r.res != nil { + // no need to read until the end + err := r.res.Body.Close() + r.res = nil + + metrics.HTTPRangeOpenRequests.Dec() + + return err + } + + return nil +} + +// NewReader creates a Reader object on a given resource for a given range +func NewReader(ctx context.Context, resource *Resource, offset, size int64) *Reader { + return &Reader{ctx: ctx, Resource: resource, rangeStart: offset, rangeSize: size, offset: offset} +} diff --git a/internal/httprange/http_reader_test.go b/internal/httprange/http_reader_test.go new file mode 100644 index 000000000..97bfbf24a --- /dev/null +++ b/internal/httprange/http_reader_test.go @@ -0,0 +1,341 @@ +package httprange + +import ( + "context" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSeekAndRead(t *testing.T) { + testServer := newTestServer(t, nil) + defer testServer.Close() + + resource, err := NewResource(context.Background(), testServer.URL+"/data") + require.NoError(t, err) + + tests := map[string]struct { + readerOffset int64 + seekOffset int64 + seekWhence int + readSize int + expectedContent string + expectedSeekErrMsg string + expectedReadErr error + }{ + // io.SeekStart ... + "read_all_from_seek_start": { + readSize: testDataLen, + seekWhence: io.SeekStart, + expectedContent: testData, + expectedReadErr: io.EOF, + }, + "read_10_bytes_from_seek_start": { + readSize: testDataLen / 3, + seekWhence: io.SeekStart, + // "1234567890" + expectedContent: testData[:testDataLen/3], + expectedReadErr: nil, + }, + "read_10_bytes_from_seek_start_with_seek_offset": { + readSize: testDataLen / 3, + seekOffset: int64(testDataLen / 3), + seekWhence: io.SeekStart, + // "abcdefghij" + expectedContent: testData[testDataLen/3 : 2*testDataLen/3], + expectedReadErr: nil, + }, + "read_10_bytes_from_seek_offset_until_eof": { + readSize: testDataLen / 3, + seekOffset: int64(2 * testDataLen / 3), + seekWhence: io.SeekStart, + // "0987654321" + expectedContent: testData[2*testDataLen/3:], + expectedReadErr: io.EOF, + }, + "read_10_bytes_from_reader_offset_with_seek_offset_to_eof": { + readSize: testDataLen / 3, + readerOffset: int64(testDataLen / 3), // reader offset at "a" + seekOffset: int64(testDataLen / 3), // seek offset at "0" + seekWhence: io.SeekStart, + // "0987654321" + expectedContent: testData[2*testDataLen/3:], + expectedReadErr: io.EOF, + }, + "invalid_seek_start_negative_seek_offset": { + seekOffset: -1, + seekWhence: io.SeekStart, + expectedSeekErrMsg: "outside of range", + }, + "invalid_range_seek_at_end": { + readSize: testDataLen, + seekOffset: int64(testDataLen), + seekWhence: io.SeekStart, + expectedReadErr: ErrInvalidRange, + }, + // io.SeekCurrent ... + "read_all_from_seek_current": { + readSize: testDataLen, + seekWhence: io.SeekCurrent, + expectedContent: testData, + expectedReadErr: io.EOF, + }, + "read_10_bytes_from_seek_current": { + readSize: testDataLen / 3, + seekWhence: io.SeekCurrent, + // "1234567890" + expectedContent: testData[:testDataLen/3], + expectedReadErr: nil, + }, + "read_10_bytes_from_seek_current_with_seek_offset": { + readSize: testDataLen / 3, + seekOffset: int64(testDataLen / 3), + seekWhence: io.SeekCurrent, + // "abcdefghij" + expectedContent: testData[testDataLen/3 : 2*testDataLen/3], + expectedReadErr: nil, + }, + "read_10_bytes_from_seek_current_with_seek_offset_until_eof": { + readSize: testDataLen / 3, + seekOffset: int64(2 * testDataLen / 3), + seekWhence: io.SeekCurrent, + // "0987654321" + expectedContent: testData[2*testDataLen/3:], + expectedReadErr: io.EOF, + }, + "read_10_bytes_from_reader_offset_and_seek_current_with_seek_offset_to_eof": { + readSize: testDataLen / 3, + readerOffset: int64(testDataLen / 3), // reader offset at "a" + seekOffset: int64(testDataLen / 3), // seek offset at "0" + seekWhence: io.SeekCurrent, + // "0987654321" + expectedContent: testData[2*testDataLen/3:], + expectedReadErr: io.EOF, + }, + "invalid_seek_current_negative_seek_offset": { + seekOffset: -1, + seekWhence: io.SeekCurrent, + expectedSeekErrMsg: "outside of range", + }, + // io.SeekEnd with negative offsets + "read_all_from_seek_end": { + readSize: testDataLen, + seekWhence: io.SeekEnd, + seekOffset: -int64(testDataLen), + expectedContent: testData, + expectedReadErr: io.EOF, + }, + "read_10_bytes_from_seek_end": { + readSize: testDataLen / 3, + seekWhence: io.SeekEnd, + seekOffset: -int64(testDataLen), + // "1234567890" + expectedContent: testData[:testDataLen/3], + expectedReadErr: nil, + }, + "read_10_bytes_from_seek_end_with_seek_offset": { + readSize: testDataLen / 3, + readerOffset: int64(2 * testDataLen / 3), + seekOffset: -int64(testDataLen / 3), + seekWhence: io.SeekEnd, + // "0987654321" + expectedContent: testData[2*testDataLen/3:], + expectedReadErr: io.EOF, + }, + "read_10_bytes_from_seek_end_with_seek_offset_until_eof": { + readSize: testDataLen / 3, + seekOffset: -int64(testDataLen / 3), + seekWhence: io.SeekEnd, + // "0987654321" + expectedContent: testData[2*testDataLen/3:], + expectedReadErr: io.EOF, + }, + "read_10_bytes_from_reader_offset_and_seek_end_with_seek_offset_to_eof": { + readSize: testDataLen / 3, + readerOffset: int64(testDataLen / 3), // reader offset at "a" + seekOffset: -int64(2 * testDataLen / 3), // seek offset at "a" + seekWhence: io.SeekEnd, + // "abcdefghij" + expectedContent: testData[testDataLen/3 : 2*testDataLen/3], + expectedReadErr: nil, + }, + "invalid_seek_end_positive_seek_offset": { + readSize: testDataLen, + seekOffset: 1, + seekWhence: io.SeekEnd, + expectedSeekErrMsg: "outside of range", + }, + "invalid_range_reading_from_end": { + readSize: testDataLen / 3, + seekWhence: io.SeekEnd, + expectedReadErr: ErrInvalidRange, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + r := NewReader(context.Background(), resource, tt.readerOffset, resource.Size-tt.readerOffset) + + _, err := r.Seek(tt.seekOffset, tt.seekWhence) + if tt.expectedSeekErrMsg != "" { + require.EqualError(t, err, tt.expectedSeekErrMsg) + return + } + require.NoError(t, err) + + buf := make([]byte, tt.readSize) + n, err := r.Read(buf) + if tt.expectedReadErr != nil { + require.Equal(t, tt.expectedReadErr, err) + return + } + + require.Equal(t, n, tt.readSize) + require.Equal(t, tt.expectedContent, string(buf)) + }) + } +} + +func TestReaderSetResponse(t *testing.T) { + tests := map[string]struct { + status int + offset int64 + prevETag string + resEtag string + expectedErrMsg string + expectedIsValid bool + }{ + "partial_content_success": { + status: http.StatusPartialContent, + expectedIsValid: true, + }, + "status_ok_success": { + status: http.StatusOK, + expectedIsValid: true, + }, + "status_ok_previous_response_invalid_offset": { + status: http.StatusOK, + offset: 1, + expectedErrMsg: ErrRangeRequestsNotSupported.Error(), + expectedIsValid: false, + }, + "status_ok_previous_response_different_etag": { + status: http.StatusOK, + prevETag: "old", + resEtag: "new", + expectedErrMsg: ErrRangeRequestsNotSupported.Error(), + expectedIsValid: false, + }, + "requested_range_not_satisfiable": { + status: http.StatusRequestedRangeNotSatisfiable, + expectedErrMsg: ErrRangeRequestsNotSupported.Error(), + expectedIsValid: false, + }, + "not_found": { + status: http.StatusNotFound, + expectedErrMsg: ErrNotFound.Error(), + expectedIsValid: false, + }, + "unhandled_status_code": { + status: http.StatusInternalServerError, + expectedErrMsg: "httprange: read response 500:", + expectedIsValid: true, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + resource := &Resource{ETag: tt.prevETag} + reader := NewReader(context.Background(), resource, tt.offset, 0) + res := &http.Response{StatusCode: tt.status, Header: map[string][]string{}} + res.Header.Set("ETag", tt.resEtag) + + err := reader.setResponse(res) + + require.Equal(t, tt.expectedIsValid, resource.Valid()) + + if tt.expectedErrMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedErrMsg) + return + } + + require.NoError(t, err) + }) + } +} + +func TestReaderSeek(t *testing.T) { + type fields struct { + Resource *Resource + res *http.Response + rangeStart int64 + rangeSize int64 + offset int64 + } + + tests := map[string]struct { + fields fields + offset int64 + whence int + want int64 + newOffset int64 + expectedErrMsg string + }{ + "invalid_whence": { + whence: -1, + expectedErrMsg: "invalid whence", + }, + "outside_of_range_invalid_offset": { + whence: io.SeekStart, + offset: -1, + fields: fields{rangeStart: 1}, + expectedErrMsg: "outside of range", + }, + "outside_of_range_invalid_new_offset": { + whence: io.SeekStart, + offset: 2, // newOffset = 3 + fields: fields{rangeStart: 1, rangeSize: 1}, + expectedErrMsg: "outside of range", + }, + "seek_start": { + whence: io.SeekStart, + offset: 1, + want: 1, + newOffset: 2, + fields: fields{rangeStart: 1, rangeSize: 1}, + }, + "seek_current": { + whence: io.SeekCurrent, + offset: 2, + want: 1, + newOffset: 2, + fields: fields{rangeStart: 1, rangeSize: 1, offset: 0}, + }, + "seek_end": { + whence: io.SeekEnd, + want: 1, + newOffset: 2, + fields: fields{rangeStart: 1, rangeSize: 1, offset: 0}, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + r := &Reader{ + res: tt.fields.res, + rangeStart: tt.fields.rangeStart, + rangeSize: tt.fields.rangeSize, + offset: tt.fields.offset, + } + + got, err := r.Seek(tt.offset, tt.whence) + if tt.expectedErrMsg != "" { + require.EqualError(t, err, tt.expectedErrMsg) + return + } + + require.Equal(t, tt.want, got) + require.Equal(t, tt.newOffset, r.offset) + }) + } +} diff --git a/internal/httprange/resource.go b/internal/httprange/resource.go new file mode 100644 index 000000000..8b908fe85 --- /dev/null +++ b/internal/httprange/resource.go @@ -0,0 +1,129 @@ +package httprange + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "sync/atomic" +) + +// Resource represents any HTTP resource that can be read by a GET operation. +// It holds the resource's URL and metadata about it. +type Resource struct { + ETag string + LastModified string + Size int64 + + url atomic.Value + err atomic.Value +} + +func (r *Resource) URL() string { + url, _ := r.url.Load().(string) + return url +} + +func (r *Resource) SetURL(url string) { + if r.URL() == url { + // We want to avoid cache lines invalidation + // on CPU due to value change + return + } + + r.url.Store(url) +} + +func (r *Resource) Err() error { + err, _ := r.err.Load().(error) + return err +} + +func (r *Resource) Valid() bool { + return r.Err() == nil +} + +func (r *Resource) setError(err error) { + r.err.Store(err) +} + +func (r *Resource) Request() (*http.Request, error) { + req, err := http.NewRequest("GET", r.URL(), nil) + if err != nil { + return nil, err + } + + if r.ETag != "" { + req.Header.Set("ETag", r.ETag) + } else if r.LastModified != "" { + // Last-Modified should be a fallback mechanism in case ETag is not present + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified + req.Header.Set("If-Range", r.LastModified) + } + + return req, nil +} + +func NewResource(ctx context.Context, url string) (*Resource, error) { + // the `h.URL` is likely pre-signed URL that only supports GET requests + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + req = req.WithContext(ctx) + + // we fetch a single byte and ensure that range requests is additionally supported + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", 0, 0)) + + // nolint: bodyclose + // body will be closed by discardAndClose + res, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + defer func() { + io.CopyN(ioutil.Discard, res.Body, 1) // since we want to read a single byte + res.Body.Close() + }() + + resource := &Resource{ + ETag: res.Header.Get("ETag"), + LastModified: res.Header.Get("Last-Modified"), + } + + resource.SetURL(url) + + switch res.StatusCode { + case http.StatusOK: + resource.Size = res.ContentLength + return resource, nil + + case http.StatusPartialContent: + contentRange := res.Header.Get("Content-Range") + ranges := strings.SplitN(contentRange, "/", 2) + if len(ranges) != 2 { + return nil, fmt.Errorf("invalid `Content-Range`: %q", contentRange) + } + + resource.Size, err = strconv.ParseInt(ranges[1], 0, 64) + if err != nil { + return nil, fmt.Errorf("invalid `Content-Range`: %q %w", contentRange, err) + } + + return resource, nil + + case http.StatusRequestedRangeNotSatisfiable: + return nil, ErrRangeRequestsNotSupported + + case http.StatusNotFound: + return nil, ErrNotFound + + default: + return nil, fmt.Errorf("httprange: new resource %d: %q", res.StatusCode, res.Status) + } +} diff --git a/internal/httprange/resource_test.go b/internal/httprange/resource_test.go new file mode 100644 index 000000000..1d6481fca --- /dev/null +++ b/internal/httprange/resource_test.go @@ -0,0 +1,108 @@ +package httprange + +import ( + "context" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/require" +) + +func urlValue(url string) atomic.Value { + v := atomic.Value{} + v.Store(url) + return v +} + +func TestNewResource(t *testing.T) { + resource := &Resource{ + url: urlValue("/some/resource"), + ETag: "etag", + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + Size: 1, + } + + tests := map[string]struct { + url string + status int + contentRange string + want *Resource + expectedErrMsg string + }{ + "status_ok": { + url: "/some/resource", + status: http.StatusOK, + want: resource, + }, + "status_partial_content_success": { + url: "/some/resource", + status: http.StatusPartialContent, + contentRange: "bytes 200-1000/67589", + want: &Resource{ + url: urlValue("/some/resource"), + ETag: "etag", + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + Size: 67589, + }, + }, + "status_partial_content_invalid_content_range": { + url: "/some/resource", + status: http.StatusPartialContent, + contentRange: "invalid", + expectedErrMsg: "invalid `Content-Range`:", + want: resource, + }, + "status_partial_content_content_range_not_a_number": { + url: "/some/resource", + status: http.StatusPartialContent, + contentRange: "bytes 200-1000/notanumber", + expectedErrMsg: "invalid `Content-Range`:", + want: resource, + }, + "StatusRequestedRangeNotSatisfiable": { + url: "/some/resource", + status: http.StatusRequestedRangeNotSatisfiable, + expectedErrMsg: ErrRangeRequestsNotSupported.Error(), + want: resource, + }, + "not_found": { + url: "/some/resource", + status: http.StatusNotFound, + expectedErrMsg: ErrNotFound.Error(), + want: resource, + }, + "invalid_url": { + url: "/%", + expectedErrMsg: "invalid URL escape", + want: resource, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("ETag", tt.want.ETag) + w.Header().Set("Last-Modified", tt.want.LastModified) + w.Header().Set("Content-Range", tt.contentRange) + w.WriteHeader(tt.status) + w.Write([]byte("1")) + })) + defer testServer.Close() + + got, err := NewResource(context.Background(), testServer.URL+tt.url) + if tt.expectedErrMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedErrMsg) + return + } + + require.NoError(t, err) + require.Contains(t, got.URL(), tt.want.URL()) + require.Equal(t, tt.want.LastModified, got.LastModified) + require.Equal(t, tt.want.ETag, got.ETag) + require.Equal(t, tt.want.Size, got.Size) + }) + } +} diff --git a/internal/httptransport/LICENSE b/internal/httptransport/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/internal/httptransport/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/httptransport/trace.go b/internal/httptransport/trace.go new file mode 100644 index 000000000..9ece5fc42 --- /dev/null +++ b/internal/httptransport/trace.go @@ -0,0 +1,77 @@ +package httptransport + +import ( + "crypto/tls" + "net/http/httptrace" + "time" + + "gitlab.com/gitlab-org/labkit/log" +) + +func (mrt *meteredRoundTripper) newTracer(start time.Time) *httptrace. + ClientTrace { + trace := &httptrace.ClientTrace{ + GetConn: func(host string) { + mrt.httpTraceObserve("httptrace.ClientTrace.GetConn", start) + + log.WithFields(log.Fields{ + "host": host, + }).Traceln("httptrace.ClientTrace.GetConn") + }, + GotConn: func(connInfo httptrace.GotConnInfo) { + mrt.httpTraceObserve("httptrace.ClientTrace.GotConn", start) + + log.WithFields(log.Fields{ + "reused": connInfo.Reused, + "was_idle": connInfo.WasIdle, + "idle_time_ms": connInfo.IdleTime.Milliseconds(), + }).Traceln("httptrace.ClientTrace.GotConn") + }, + GotFirstResponseByte: func() { + mrt.httpTraceObserve("httptrace.ClientTrace.GotFirstResponseByte", start) + }, + DNSStart: func(d httptrace.DNSStartInfo) { + mrt.httpTraceObserve("httptrace.ClientTrace.DNSStart", start) + }, + DNSDone: func(d httptrace.DNSDoneInfo) { + mrt.httpTraceObserve("httptrace.ClientTrace.DNSDone", start) + + log.WithFields(log.Fields{}).WithError(d.Err). + Traceln("httptrace.ClientTrace.DNSDone") + }, + ConnectStart: func(net, addr string) { + mrt.httpTraceObserve("httptrace.ClientTrace.ConnectStart", start) + + log.WithFields(log.Fields{ + "network": net, + "address": addr, + }).Traceln("httptrace.ClientTrace.ConnectStart") + }, + ConnectDone: func(net string, addr string, err error) { + mrt.httpTraceObserve("httptrace.ClientTrace.ConnectDone", start) + + log.WithFields(log.Fields{ + "network": net, + "address": addr, + }).WithError(err).Traceln("httptrace.ClientTrace.ConnectDone") + }, + TLSHandshakeStart: func() { + mrt.httpTraceObserve("httptrace.ClientTrace.TLSHandshakeStart", start) + }, + TLSHandshakeDone: func(connState tls.ConnectionState, err error) { + mrt.httpTraceObserve("httptrace.ClientTrace.TLSHandshakeDone", start) + + log.WithFields(log.Fields{ + "version": connState.Version, + "connection_resumed": connState.DidResume, + }).WithError(err).Traceln("httptrace.ClientTrace.TLSHandshakeDone") + }, + } + + return trace +} + +func (mrt *meteredRoundTripper) httpTraceObserve(label string, start time.Time) { + mrt.tracer.WithLabelValues(label). + Observe(time.Since(start).Seconds()) +} diff --git a/internal/httptransport/transport.go b/internal/httptransport/transport.go new file mode 100644 index 000000000..d8e6a3fe3 --- /dev/null +++ b/internal/httptransport/transport.go @@ -0,0 +1,142 @@ +package httptransport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "net" + "net/http" + "net/http/httptrace" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" +) + +const ( + // DefaultTTFBTimeout is the timeout used in the meteredRoundTripper + // when calling http.Transport.RoundTrip. The request will be cancelled + // if the response takes longer than this. + DefaultTTFBTimeout = 15 * time.Second +) + +var ( + sysPoolOnce = &sync.Once{} + sysPool *x509.CertPool + + // only overridden by transport_darwin.go + loadExtraCerts = func() {} + // InternalTransport can be used with http.Client with TLS and certificates + InternalTransport = newInternalTransport() +) + +type meteredRoundTripper struct { + next http.RoundTripper + name string + tracer *prometheus.HistogramVec + durations *prometheus.HistogramVec + counter *prometheus.CounterVec + ttfbTimeout time.Duration +} + +func newInternalTransport() *http.Transport { + return &http.Transport{ + DialTLS: func(network, addr string) (net.Conn, error) { + return tls.Dial(network, addr, &tls.Config{RootCAs: pool()}) + }, + Proxy: http.ProxyFromEnvironment, + // overrides the DefaultMaxIdleConnsPerHost = 2 + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + // Set more timeouts https://gitlab.com/gitlab-org/gitlab-pages/-/issues/495 + TLSHandshakeTimeout: 10 * time.Second, + ResponseHeaderTimeout: 15 * time.Second, + ExpectContinueTimeout: 15 * time.Second, + } +} + +// NewTransportWithMetrics will create a custom http.RoundTripper that can be used with an http.Client. +// The RoundTripper will report metrics based on the collectors passed. +func NewTransportWithMetrics(name string, tracerVec, durationsVec *prometheus. + HistogramVec, counterVec *prometheus.CounterVec, ttfbTimeout time.Duration) http.RoundTripper { + return &meteredRoundTripper{ + next: InternalTransport, + name: name, + tracer: tracerVec, + durations: durationsVec, + counter: counterVec, + ttfbTimeout: ttfbTimeout, + } +} + +// This is here because macOS does not support the SSL_CERT_FILE and +// SSL_CERT_DIR environment variables. We have arranged things to read +// SSL_CERT_FILE and SSL_CERT_DIR as late as possible to avoid conflicts +// with file descriptor passing at startup. +func pool() *x509.CertPool { + sysPoolOnce.Do(loadPool) + return sysPool +} + +func loadPool() { + var err error + + // Always load the system cert pool + sysPool, err = x509.SystemCertPool() + if err != nil { + log.WithError(err).Error("failed to load system cert pool for http client") + return + } + + // Go does not load SSL_CERT_FILE and SSL_CERT_DIR on darwin systems so we need to + // load them manually in OSX. See https://golang.org/src/crypto/x509/root_unix.go + loadExtraCerts() +} + +// withRoundTripper takes an original RoundTripper, reports metrics based on the +// gauge and counter collectors passed +func (mrt *meteredRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + start := time.Now() + + ctx := httptrace.WithClientTrace(r.Context(), mrt.newTracer(start)) + ctx, cancel := context.WithCancel(ctx) + + timer := time.AfterFunc(mrt.ttfbTimeout, cancel) + defer timer.Stop() + + r = r.WithContext(ctx) + + resp, err := mrt.next.RoundTrip(r) + if err != nil { + mrt.counter.WithLabelValues("error").Inc() + return nil, err + } + + mrt.logResponse(r, resp) + + statusCode := strconv.Itoa(resp.StatusCode) + mrt.durations.WithLabelValues(statusCode).Observe(time.Since(start).Seconds()) + mrt.counter.WithLabelValues(statusCode).Inc() + + return resp, nil +} + +func (mrt *meteredRoundTripper) logResponse(req *http.Request, resp *http.Response) { + if log.GetLevel() == log.TraceLevel { + l := log.WithFields(log.Fields{ + "client_name": mrt.name, + "req_url": req.URL.String(), + "res_status_code": resp.StatusCode, + }) + + for header, value := range resp.Header { + l = l.WithField(strings.ToLower(header), strings.Join(value, ";")) + } + + l.Traceln("response") + } +} diff --git a/internal/httptransport/transport_darwin.go b/internal/httptransport/transport_darwin.go new file mode 100644 index 000000000..b73009dab --- /dev/null +++ b/internal/httptransport/transport_darwin.go @@ -0,0 +1,118 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Modified copy of https://golang.org/src/crypto/x509/root_unix.go + +package httptransport + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + log "github.com/sirupsen/logrus" +) + +const ( + // certFileEnv is the environment variable which identifies where to locate + // the SSL certificate file. If set this overrides the system default. + certFileEnv = "SSL_CERT_FILE" + + // certDirEnv is the environment variable which identifies which directory + // to check for SSL certificate files. If set this overrides the system default. + // It is a colon separated list of directories. + // See https://www.openssl.org/docs/man1.0.2/man1/c_rehash.html. + certDirEnv = "SSL_CERT_DIR" +) + +func init() { + // override and load SSL_CERT_FILE and SSL_CERT_DIR in OSX. + loadExtraCerts = func() { + if err := loadCertFile(); err != nil { + log.WithError(err).Error("failed to read SSL_CERT_FILE") + } + + if err := loadCertDir(); err != nil { + log.WithError(err).Error("failed to load SSL_CERT_DIR") + } + } +} + +func loadCertFile() error { + sslCertFile := os.Getenv(certFileEnv) + if sslCertFile == "" { + return nil + } + + data, err := ioutil.ReadFile(sslCertFile) + if err != nil && !os.IsNotExist(err) { + return err + } + + sysPool.AppendCertsFromPEM(data) + + return nil +} + +func loadCertDir() error { + var firstErr error + var dirs []string + if d := os.Getenv(certDirEnv); d != "" { + // OpenSSL and BoringSSL both use ":" as the SSL_CERT_DIR separator. + // See: + // * https://golang.org/issue/35325 + // * https://www.openssl.org/docs/man1.0.2/man1/c_rehash.html + dirs = strings.Split(d, ":") + } + + for _, directory := range dirs { + fis, err := readUniqueDirectoryEntries(directory) + if err != nil { + if firstErr == nil && !os.IsNotExist(err) { + firstErr = err + } + continue + } + + rootsAdded := false + for _, fi := range fis { + data, err := ioutil.ReadFile(directory + "/" + fi.Name()) + if err == nil && sysPool.AppendCertsFromPEM(data) { + rootsAdded = true + } + } + + if rootsAdded { + return nil + } + } + + return firstErr +} + +// readUniqueDirectoryEntries is like ioutil.ReadDir but omits +// symlinks that point within the directory. +func readUniqueDirectoryEntries(dir string) ([]os.FileInfo, error) { + fis, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + uniq := fis[:0] + for _, fi := range fis { + if !isSameDirSymlink(fi, dir) { + uniq = append(uniq, fi) + } + } + return uniq, nil +} + +// isSameDirSymlink reports whether fi in dir is a symlink with a +// target not containing a slash. +func isSameDirSymlink(fi os.FileInfo, dir string) bool { + if fi.Mode()&os.ModeSymlink == 0 { + return false + } + target, err := os.Readlink(filepath.Join(dir, fi.Name())) + return err == nil && !strings.Contains(target, "/") +} diff --git a/internal/httptransport/transport_test.go b/internal/httptransport/transport_test.go new file mode 100644 index 000000000..9059ea153 --- /dev/null +++ b/internal/httptransport/transport_test.go @@ -0,0 +1,135 @@ +package httptransport + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "strconv" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/require" +) + +func Test_withRoundTripper(t *testing.T) { + tests := []struct { + name string + statusCode int + err error + }{ + { + name: "successful_response", + statusCode: http.StatusNoContent, + }, + { + name: "error_response", + statusCode: http.StatusForbidden, + }, + { + name: "internal_error_response", + statusCode: http.StatusInternalServerError, + }, + { + name: "unhandled_status_response", + statusCode: http.StatusPermanentRedirect, + }, + { + name: "client_error", + err: fmt.Errorf("something went wrong"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + histVec, counterVec := newTestMetrics(t) + + next := &mockRoundTripper{ + res: &http.Response{ + StatusCode: tt.statusCode, + }, + err: tt.err, + timeout: time.Nanosecond, + } + + mtr := &meteredRoundTripper{next: next, durations: histVec, counter: counterVec, ttfbTimeout: DefaultTTFBTimeout} + r := httptest.NewRequest("GET", "/", nil) + + res, err := mtr.RoundTrip(r) + if tt.err != nil { + counterCount := testutil.ToFloat64(counterVec.WithLabelValues("error")) + require.Equal(t, float64(1), counterCount, "error") + + return + } + require.NoError(t, err) + require.NotNil(t, res) + + statusCode := strconv.Itoa(res.StatusCode) + counterCount := testutil.ToFloat64(counterVec.WithLabelValues(statusCode)) + require.Equal(t, float64(1), counterCount, statusCode) + }) + } +} + +func TestRoundTripTTFBTimeout(t *testing.T) { + histVec, counterVec := newTestMetrics(t) + + next := &mockRoundTripper{ + res: &http.Response{ + StatusCode: http.StatusOK, + }, + timeout: time.Millisecond, + err: nil, + } + + mtr := &meteredRoundTripper{next: next, durations: histVec, counter: counterVec, ttfbTimeout: time.Nanosecond} + req, err := http.NewRequest("GET", "https://gitlab.com", nil) + require.NoError(t, err) + + res, err := mtr.RoundTrip(req) + require.Nil(t, res) + require.True(t, errors.Is(err, context.Canceled), "context must have been canceled after ttfb timeout") +} + +func newTestMetrics(t *testing.T) (*prometheus.HistogramVec, *prometheus.CounterVec) { + t.Helper() + + histVec := prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: t.Name(), + }, []string{"status_code"}) + + counterVec := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: t.Name(), + }, []string{"status_code"}) + + return histVec, counterVec +} + +type mockRoundTripper struct { + res *http.Response + err error + timeout time.Duration +} + +func (mrt *mockRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + select { + case <-r.Context().Done(): + return nil, r.Context().Err() + case <-time.After(mrt.timeout): + return mrt.res, mrt.err + } +} + +func TestInternalTransportShouldHaveCustomConnectionPoolSettings(t *testing.T) { + require.EqualValues(t, 100, InternalTransport.MaxIdleConns) + require.EqualValues(t, 100, InternalTransport.MaxIdleConnsPerHost) + require.EqualValues(t, 0, InternalTransport.MaxConnsPerHost) + require.EqualValues(t, 90*time.Second, InternalTransport.IdleConnTimeout) + require.EqualValues(t, 10*time.Second, InternalTransport.TLSHandshakeTimeout) + require.EqualValues(t, 15*time.Second, InternalTransport.ResponseHeaderTimeout) + require.EqualValues(t, 15*time.Second, InternalTransport.ExpectContinueTimeout) +} diff --git a/internal/httputil/LICENSE b/internal/httputil/LICENSE new file mode 100644 index 000000000..65d761bc9 --- /dev/null +++ b/internal/httputil/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/httputil/README.md b/internal/httputil/README.md new file mode 100644 index 000000000..5027c2f53 --- /dev/null +++ b/internal/httputil/README.md @@ -0,0 +1,5 @@ +This folder is a partial import of the [GoDoc API package](https://github.com/golang/gddo), +``` +github.com/golang/gddo/httputil +``` +where the original license (see `LICENSE`) has been incorporated herein. diff --git a/internal/httputil/header/header.go b/internal/httputil/header/header.go new file mode 100644 index 000000000..0f1572e3f --- /dev/null +++ b/internal/httputil/header/header.go @@ -0,0 +1,298 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" + "time" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// Copy returns a shallow copy of the header. +func Copy(header http.Header) http.Header { + h := make(http.Header) + for k, vs := range header { + h[k] = vs + } + return h +} + +var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC} + +// ParseTime parses the header as time. The zero value is returned if the +// header is not present or there is an error parsing the +// header. +func ParseTime(header http.Header, key string) time.Time { + if s := header.Get(key); s != "" { + for _, layout := range timeLayouts { + if t, err := time.Parse(layout, s); err == nil { + return t.UTC() + } + } + } + return time.Time{} +} + +// ParseList parses a comma separated list of values. Commas are ignored in +// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is +// trimmed. +func ParseList(header http.Header, key string) []string { + var result []string + for _, s := range header[http.CanonicalHeaderKey(key)] { + begin := 0 + end := 0 + escape := false + quote := false + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + end = i + 1 + case quote: + switch b { + case '\\': + escape = true + case '"': + quote = false + } + end = i + 1 + case b == '"': + quote = true + end = i + 1 + case octetTypes[b]&isSpace != 0: + if begin == end { + begin = i + 1 + end = begin + } + case b == ',': + if begin < end { + result = append(result, s[begin:end]) + } + begin = i + 1 + end = begin + default: + end = i + 1 + } + } + if begin < end { + result = append(result, s[begin:end]) + } + } + return result +} + +// ParseValueAndParams parses a comma separated list of values with optional +// semicolon separated name-value pairs. Content-Type and Content-Disposition +// headers are in this format. +func ParseValueAndParams(header http.Header, key string) (value string, params map[string]string) { + params = make(map[string]string) + s := header.Get(key) + value, s = expectTokenSlash(s) + if value == "" { + return + } + value = strings.ToLower(value) + s = skipSpace(s) + for strings.HasPrefix(s, ";") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/internal/httputil/negotiate.go b/internal/httputil/negotiate.go new file mode 100644 index 000000000..a25e3ed1c --- /dev/null +++ b/internal/httputil/negotiate.go @@ -0,0 +1,80 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + "strings" + + "gitlab.com/gitlab-org/gitlab-pages/internal/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} + +// NegotiateContentType returns the best offered content type for the request's +// Accept header. If two offers match with equal weight, then the more specific +// offer is preferred. For example, text/* trumps */*. If two offers match +// with equal weight and specificity, then the offer earlier in the list is +// preferred. If no offers match, then defaultOffer is returned. +func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string { + bestOffer := defaultOffer + bestQ := -1.0 + bestWild := 3 + specs := header.ParseAccept(r.Header, "Accept") + for _, offer := range offers { + for _, spec := range specs { + switch { + case spec.Q == 0.0: + // ignore + case spec.Q < bestQ: + // better match found + case spec.Value == "*/*": + if spec.Q > bestQ || bestWild > 2 { + bestQ = spec.Q + bestWild = 2 + bestOffer = offer + } + case strings.HasSuffix(spec.Value, "/*"): + if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) && + (spec.Q > bestQ || bestWild > 1) { + bestQ = spec.Q + bestWild = 1 + bestOffer = offer + } + default: + if spec.Value == offer && + (spec.Q > bestQ || bestWild > 0) { + bestQ = spec.Q + bestWild = 0 + bestOffer = offer + } + } + } + } + return bestOffer +} diff --git a/internal/interface.go b/internal/interface.go new file mode 100644 index 000000000..3e82ee3af --- /dev/null +++ b/internal/interface.go @@ -0,0 +1,18 @@ +package internal + +import ( + "net/http" +) + +// Artifact allows to handle artifact related requests +type Artifact interface { + TryMakeRequest(host string, w http.ResponseWriter, r *http.Request, token string, responseHandler func(*http.Response) bool) bool +} + +// Auth handles the authentication logic +type Auth interface { + IsAuthSupported() bool + RequireAuth(w http.ResponseWriter, r *http.Request) bool + GetTokenIfExists(w http.ResponseWriter, r *http.Request) (string, error) + CheckResponseForInvalidToken(w http.ResponseWriter, r *http.Request, resp *http.Response) bool +} diff --git a/internal/jail/jail.go b/internal/jail/jail.go new file mode 100644 index 000000000..13b393745 --- /dev/null +++ b/internal/jail/jail.go @@ -0,0 +1,246 @@ +package jail + +import ( + "fmt" + "io" + "os" + "path" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +type pathAndMode struct { + path string + mode os.FileMode + + // Only respected if mode is os.ModeCharDevice + rdev int +} + +// Jail is a Chroot jail builder +type Jail struct { + root string + deleteRoot bool + directories []pathAndMode + files map[string]pathAndMode + bindMounts map[string]string +} + +// Into returns a Jail on path, assuming it already exists on disk. On disposal, +// the jail *will not* remove the path +func Into(path string) *Jail { + return &Jail{ + root: path, + deleteRoot: false, + files: make(map[string]pathAndMode), + bindMounts: make(map[string]string), + } +} + +// Create returns a Jail on path, creating the directory if needed. On disposal, +// the jail will remove the path +func Create(path string, perm os.FileMode) *Jail { + jail := Into(path) + jail.deleteRoot = true + jail.directories = append(jail.directories, pathAndMode{path: path, mode: perm}) + + return jail +} + +// CreateTimestamped returns a Jail on a path composed by prefix and current +// timestamp, creating the directory. On disposal, the jail will remove the path +func CreateTimestamped(prefix string, perm os.FileMode) *Jail { + jailPath := path.Join(os.TempDir(), fmt.Sprintf("%s-%d", prefix, time.Now().UnixNano())) + + return Create(jailPath, perm) +} + +// Path returns the path of the jail +func (j *Jail) Path() string { + return j.root +} + +// Build creates the jail, making directories and copying files. If an error +// setting up is encountered, a best-effort attempt will be made to remove any +// partial state before returning the error +func (j *Jail) Build() error { + // Simplify error-handling in this method. It's unsafe to run os.RemoveAll() + // across a bind mount. Only one is needed at present, and this restriction + // means there's no need to handle the case where one of several mounts + // failed in j.mount() + // + // Make j.mount() robust before removing this restriction, at the risk of + // extreme data loss + if len(j.bindMounts) > 1 { + return fmt.Errorf("BUG: jail does not currently support multiple bind mounts") + } + + for _, dir := range j.directories { + if err := os.Mkdir(dir.path, dir.mode); err != nil { + j.removeAll() + return fmt.Errorf("can't create directory %q. %s", dir.path, err) + } + } + + for dest, src := range j.files { + if err := handleFile(dest, src); err != nil { + j.removeAll() + return fmt.Errorf("can't copy %q -> %q. %s", src.path, dest, err) + } + } + + if err := j.mount(); err != nil { + // Only one bind mount is supported. If it failed to mount, there is + // nothing to unmount, so it is safe to run removeAll() here. + j.removeAll() + return err + } + + return nil +} + +func (j *Jail) removeAll() error { + // Deleting the root will remove all child directories, so there's no need + // to traverse files and directories + if j.deleteRoot { + if err := os.RemoveAll(j.Path()); err != nil { + return fmt.Errorf("can't delete jail %q. %s", j.Path(), err) + } + } else { + for path := range j.files { + if err := os.Remove(path); err != nil { + return fmt.Errorf("can't delete file in jail %q: %s", path, err) + } + } + + // Iterate directories in reverse to remove children before parents + for i := len(j.directories) - 1; i >= 0; i-- { + dest := j.directories[i] + if err := os.Remove(dest.path); err != nil { + return fmt.Errorf("can't delete directory in jail %q: %s", dest.path, err) + } + } + } + + return nil +} + +// Dispose erases everything inside the jail +func (j *Jail) Dispose() error { + if err := j.unmount(); err != nil { + return err + } + + if err := j.removeAll(); err != nil { + return fmt.Errorf("can't delete jail %q. %s", j.Path(), err) + } + + return nil +} + +// MkDir enqueue a mkdir operation at jail building time +func (j *Jail) MkDir(path string, perm os.FileMode) { + j.directories = append(j.directories, pathAndMode{path: j.ExternalPath(path), mode: perm}) +} + +// CharDev enqueues an mknod operation for the given character device at jail +// building time +func (j *Jail) CharDev(path string) error { + fi, err := os.Stat(path) + if err != nil { + return fmt.Errorf("can't stat %q: %s", path, err) + } + + if (fi.Mode() & os.ModeCharDevice) == 0 { + return fmt.Errorf("can't mknod %q: not a character device", path) + } + + // Read the device number from the underlying unix implementation of stat() + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("couldn't determine rdev for %q", path) + } + + jailedDest := j.ExternalPath(path) + j.files[jailedDest] = pathAndMode{ + path: path, + mode: fi.Mode(), + rdev: int(sys.Rdev), + } + + return nil +} + +// CopyTo enqueues a file copy operation at jail building time +func (j *Jail) CopyTo(dest, src string) error { + fi, err := os.Stat(src) + if err != nil { + return fmt.Errorf("can't stat %q. %s", src, err) + } + + if fi.IsDir() { + return fmt.Errorf("can't copy directories. %s", src) + } + + jailedDest := j.ExternalPath(dest) + j.files[jailedDest] = pathAndMode{ + path: src, + mode: fi.Mode(), + } + + return nil +} + +// Copy enqueues a file copy operation at jail building time +func (j *Jail) Copy(path string) error { + return j.CopyTo(path, path) +} + +// Bind enqueues a bind mount operation at jail building time +func (j *Jail) Bind(dest, src string) { + jailedDest := j.ExternalPath(dest) + j.bindMounts[jailedDest] = src +} + +// ExternalPath converts a jail internal path to the equivalent jail external path +func (j *Jail) ExternalPath(internal string) string { + return path.Join(j.Path(), internal) +} + +func handleFile(dest string, src pathAndMode) error { + // Using `io.Copy` on a character device simply doesn't work + if (src.mode & os.ModeCharDevice) > 0 { + return createCharacterDevice(dest, src) + } + + // FIXME: currently, symlinks, block devices, named pipes and other + // non-regular files will be `Open`ed and have that content streamed to a + // regular file inside the chroot. This is actually desired behaviour for, + // e.g., `/etc/resolv.conf`, but was very surprising + return copyFile(dest, src.path, src.mode) +} + +func createCharacterDevice(dest string, src pathAndMode) error { + unixMode := uint32(src.mode.Perm() | syscall.S_IFCHR) + + return unix.Mknod(dest, unixMode, src.rdev) +} + +func copyFile(dest, src string, perm os.FileMode) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, srcFile) + return err +} diff --git a/internal/jail/jail_test.go b/internal/jail/jail_test.go new file mode 100644 index 000000000..75150da38 --- /dev/null +++ b/internal/jail/jail_test.go @@ -0,0 +1,298 @@ +package jail_test + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/jail" +) + +func tmpJailPath() string { + return path.Join(os.TempDir(), fmt.Sprintf("my-jail-%d", time.Now().Unix())) +} + +func TestTimestampedJails(t *testing.T) { + require := require.New(t) + + prefix := "jail" + var mode os.FileMode = 0755 + + j1 := jail.CreateTimestamped(prefix, mode) + j2 := jail.CreateTimestamped(prefix, mode) + + require.NotEqual(j1.Path(), j2.Path()) +} + +func TestJailPath(t *testing.T) { + require := require.New(t) + + jailPath := tmpJailPath() + cage := jail.Create(jailPath, 0755) + + require.Equal(jailPath, cage.Path()) +} + +func TestJailBuild(t *testing.T) { + require := require.New(t) + + jailPath := tmpJailPath() + cage := jail.Create(jailPath, 0755) + + _, err := os.Stat(cage.Path()) + require.Error(err, "Jail path should not exist before Jail.Build()") + + err = cage.Build() + require.NoError(err) + defer cage.Dispose() + + _, err = os.Stat(cage.Path()) + require.NoError(err, "Jail path should exist after Jail.Build()") +} + +func TestJailOnlySupportsOneBindMount(t *testing.T) { + jailPath := tmpJailPath() + cage := jail.Create(jailPath, 0755) + + cage.Bind("/bin", "/bin") + cage.Bind("/lib", "/lib") + cage.Bind("/usr", "/usr") + + err := cage.Build() + require.Error(t, err, "Build() is expected to fail in this test") + + _, statErr := os.Stat(cage.Path()) + require.True(t, os.IsNotExist(statErr), "Jail path should not exist") +} + +func TestJailBuildCleansUpWhenMountFails(t *testing.T) { + jailPath := tmpJailPath() + cage := jail.Create(jailPath, 0755) + cage.Bind("/foo", "/this/path/does/not/exist/so/mount/will/fail") + + err := cage.Build() + require.Error(t, err, "Build() is expected to fail in this test") + + _, statErr := os.Stat(cage.Path()) + require.True(t, os.IsNotExist(statErr), "Jail path should have been cleaned up") +} + +func TestJailDispose(t *testing.T) { + require := require.New(t) + + jailPath := tmpJailPath() + cage := jail.Create(jailPath, 0755) + + err := cage.Build() + require.NoError(err) + + err = cage.Dispose() + require.NoError(err) + + _, err = os.Stat(cage.Path()) + require.Error(err, "Jail path should not exist after Jail.Dispose()") +} + +func TestJailDisposeDoNotFailOnMissingPath(t *testing.T) { + require := require.New(t) + + jailPath := tmpJailPath() + cage := jail.Create(jailPath, 0755) + + _, err := os.Stat(cage.Path()) + require.Error(err, "Jail path should not exist") + + err = cage.Dispose() + require.NoError(err) +} + +func TestJailWithCharacterDevice(t *testing.T) { + if os.Geteuid() != 0 { + t.Log("This test only works if run as root") + t.SkipNow() + } + + // Determine the expected rdev + fi, err := os.Stat("/dev/urandom") + require.NoError(t, err) + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + t.Log("Couldn't determine expected rdev for /dev/urandom, skipping") + t.SkipNow() + } + + expectedRdev := sys.Rdev + + jailPath := tmpJailPath() + cage := jail.Create(jailPath, 0755) + cage.MkDir("/dev", 0755) + + require.NoError(t, cage.CharDev("/dev/urandom")) + require.NoError(t, cage.Build()) + defer cage.Dispose() + + fi, err = os.Lstat(path.Join(cage.Path(), "/dev/urandom")) + require.NoError(t, err) + + isCharDev := fi.Mode()&os.ModeCharDevice == os.ModeCharDevice + require.True(t, isCharDev, "Created file was not a character device") + + sys, ok = fi.Sys().(*syscall.Stat_t) + require.True(t, ok, "Couldn't determine rdev of created character device") + require.Equal(t, expectedRdev, sys.Rdev, "Incorrect rdev for /dev/urandom") +} + +func TestJailWithFiles(t *testing.T) { + tests := []struct { + name string + directories []string + files []string + error bool + }{ + { + name: "Happy path", + directories: []string{"/tmp", "/tmp/foo", "/bar"}, + }, + { + name: "Missing direcories in path", + directories: []string{"/tmp/foo/bar"}, + error: true, + }, + { + name: "copy /etc/resolv.conf", + directories: []string{"/etc"}, + files: []string{"/etc/resolv.conf"}, + }, + { + name: "copy /etc/resolv.conf without creating /etc", + files: []string{"/etc/resolv.conf"}, + error: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + cage := jail.CreateTimestamped("jail-mkdir", 0755) + for _, dir := range test.directories { + cage.MkDir(dir, 0755) + } + for _, file := range test.files { + if err := cage.Copy(file); err != nil { + t.Errorf("Can't prepare copy of %s inside the jail. %s", file, err) + } + } + + err := cage.Build() + defer cage.Dispose() + + if test.error { + require.Error(err) + } else { + require.NoError(err) + + for _, dir := range test.directories { + _, err := os.Stat(path.Join(cage.Path(), dir)) + require.NoError(err, "jailed dir should exist") + } + + for _, file := range test.files { + _, err := os.Stat(path.Join(cage.Path(), file)) + require.NoError(err, "Jailed file should exist") + } + } + }) + } +} + +func TestJailCopyTo(t *testing.T) { + require := require.New(t) + + content := "hello" + + cage := jail.CreateTimestamped("check-file-copy", 0755) + + tmpFile, err := ioutil.TempFile("", "dummy-file") + if err != nil { + t.Error("Can't create temporary file") + } + defer os.Remove(tmpFile.Name()) + tmpFile.WriteString(content) + + filePath := tmpFile.Name() + jailedFilePath := cage.ExternalPath(path.Base(filePath)) + + err = cage.CopyTo(path.Base(filePath), filePath) + require.NoError(err) + + err = cage.Build() + defer cage.Dispose() + require.NoError(err) + + jailedFI, err := os.Stat(jailedFilePath) + require.NoError(err) + + fi, err := os.Stat(filePath) + require.NoError(err) + + require.Equal(fi.Mode(), jailedFI.Mode(), "jailed file should preserve file mode") + require.Equal(fi.Size(), jailedFI.Size(), "jailed file should have same size of original file") + + jailedContent, err := ioutil.ReadFile(jailedFilePath) + require.NoError(err) + require.Equal(content, string(jailedContent), "jailed file should preserve file content") +} + +func TestJailIntoOnlyCleansSubpaths(t *testing.T) { + jailPath := tmpJailPath() + require.NoError(t, os.MkdirAll(jailPath, 0755)) + defer os.RemoveAll(jailPath) + + chroot := jail.Into(jailPath) + chroot.MkDir("/etc", 0755) + chroot.Copy("/etc/resolv.conf") + require.NoError(t, chroot.Build()) + require.NoError(t, chroot.Dispose()) + + _, err := os.Stat(path.Join(jailPath, "/etc/resolv.conf")) + require.True(t, os.IsNotExist(err), "/etc/resolv.conf in jail was not removed") + _, err = os.Stat(path.Join(jailPath, "/etc")) + require.True(t, os.IsNotExist(err), "/etc in jail was not removed") + _, err = os.Stat(jailPath) + require.NoError(t, err, "/ in jail (corresponding to external directory) was removed") +} + +func TestJailIntoCleansNestedDirs(t *testing.T) { + jailPath := tmpJailPath() + require.NoError(t, os.MkdirAll(jailPath, 0755)) + defer os.RemoveAll(jailPath) + + chroot := jail.Into(jailPath) + + // These need to be cleaned up in reverse order + chroot.MkDir("/way", 0755) + chroot.MkDir("/way/down", 0755) + chroot.MkDir("/way/down/here", 0755) + + require.NoError(t, chroot.Build()) + require.NoError(t, chroot.Dispose()) + + verify := func(inPath string) { + _, err := os.Stat(path.Join(jailPath, inPath)) + require.True(t, os.IsNotExist(err), "{} in jail was not removed", inPath) + } + + verify("/way") + verify("/way/down") + verify("/way/down/here") + + _, err := os.Stat(jailPath) + require.NoError(t, err, "/ in jail (corresponding to external directory) was removed") +} diff --git a/internal/jail/mount_linux.go b/internal/jail/mount_linux.go new file mode 100644 index 000000000..54093c401 --- /dev/null +++ b/internal/jail/mount_linux.go @@ -0,0 +1,55 @@ +package jail + +import ( + "fmt" + "syscall" + + log "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Unshare makes the process to own it's own mount namespace +// and prevent the changes for mounts to be propagated +// to other mount namespace. Making all mount changes local +// to current process +func (j *Jail) Unshare() error { + // https://man7.org/linux/man-pages/man7/mount_namespaces.7.html + err := syscall.Unshare(syscall.CLONE_NEWNS) + log.WithError(err).Info("unsharing mount namespace") + if err != nil { + return err + } + + // As documented in `mount_namespaces`: + // An application that creates a new mount namespace directly using + // clone(2) or unshare(2) may desire to prevent propagation of mount + // events to other mount namespaces + err = syscall.Mount("none", "/", "", unix.MS_REC|unix.MS_PRIVATE, "") + log.WithError(err).Info("changing root filesystem propagation") + return err +} + +func (j *Jail) mount() error { + for dest, src := range j.bindMounts { + var opts uintptr = unix.MS_BIND | unix.MS_REC + if err := unix.Mount(src, dest, "none", opts, ""); err != nil { + return fmt.Errorf("failed to bind mount %s on %s. %s", src, dest, err) + } + } + + return nil +} + +func (j *Jail) unmount() error { + for dest := range j.bindMounts { + if err := unix.Unmount(dest, unix.MNT_DETACH); err != nil { + // A second invocation on unmount with MNT_DETACH flag will return EINVAL + // there's no need to abort with an error if bind mountpoint is already unmounted + if err != unix.EINVAL { + return fmt.Errorf("failed to unmount %s. %s", dest, err) + } + } + } + + return nil +} diff --git a/internal/jail/mount_not_supported.go b/internal/jail/mount_not_supported.go new file mode 100644 index 000000000..b4d3e3488 --- /dev/null +++ b/internal/jail/mount_not_supported.go @@ -0,0 +1,27 @@ +// +build !linux + +package jail + +import ( + "fmt" + "runtime" +) + +func (j *Jail) Unshare() error { + return fmt.Errorf("unshare not supported on %s", runtime.GOOS) +} + +func (j *Jail) notSupported() error { + if len(j.bindMounts) > 0 { + return fmt.Errorf("bind mount not supported on %s", runtime.GOOS) + } + + return nil +} +func (j *Jail) mount() error { + return j.notSupported() +} + +func (j *Jail) unmount() error { + return j.notSupported() +} diff --git a/internal/logging/logging.go b/internal/logging/logging.go new file mode 100644 index 000000000..4dcf66d29 --- /dev/null +++ b/internal/logging/logging.go @@ -0,0 +1,109 @@ +package logging + +import ( + "net/http" + + "github.com/sirupsen/logrus" + "gitlab.com/gitlab-org/labkit/log" + + "gitlab.com/gitlab-org/gitlab-pages/internal/request" +) + +// ConfigureLogging will initialize the system logger. +func ConfigureLogging(format string, verbose bool) error { + var levelOption log.LoggerOption + + if format == "" { + format = "text" + } + + if verbose { + levelOption = log.WithLogLevel("trace") + } else { + levelOption = log.WithLogLevel("info") + } + + _, err := log.Initialize( + log.WithFormatter(format), + levelOption, + ) + return err +} + +// getAccessLogger will return the default logger, except when +// the log format is text, in which case a combined HTTP access +// logger will be configured. This behaviour matches Workhorse +func getAccessLogger(format string) (*logrus.Logger, error) { + if format != "text" && format != "" { + return logrus.StandardLogger(), nil + } + + accessLogger := log.New() + _, err := log.Initialize( + log.WithLogger(accessLogger), // Configure `accessLogger` + log.WithFormatter("combined"), // Use the combined formatter + ) + if err != nil { + return nil, err + } + + return accessLogger, nil +} + +// getExtraLogFields is used to inject additional fields into the +// HTTP access logger middleware. +func getExtraLogFields(r *http.Request) log.Fields { + logFields := log.Fields{ + "pages_https": request.IsHTTPS(r), + "pages_host": request.GetHost(r), + } + + if d := request.GetDomain(r); d != nil { + lp, err := d.GetLookupPath(r) + if err != nil { + return logFields + } + + logFields["pages_project_serving_type"] = lp.ServingType + logFields["pages_project_prefix"] = lp.Prefix + logFields["pages_project_id"] = lp.ProjectID + } + + return logFields +} + +// BasicAccessLogger configures the GitLab pages basic HTTP access logger middleware +func BasicAccessLogger(handler http.Handler, format string, extraFields log.ExtraFieldsGeneratorFunc) (http.Handler, error) { + accessLogger, err := getAccessLogger(format) + if err != nil { + return nil, err + } + + if extraFields == nil { + extraFields = func(r *http.Request) log.Fields { + return log.Fields{ + "pages_https": request.IsHTTPS(r), + "pages_host": r.Host, + } + } + } + + return log.AccessLogger(handler, + log.WithExtraFields(extraFields), + log.WithAccessLogger(accessLogger), + log.WithXFFAllowed(func(sip string) bool { return false }), + ), nil +} + +// AccessLogger configures the GitLab pages HTTP access logger middleware with extra log fields +func AccessLogger(handler http.Handler, format string) (http.Handler, error) { + return BasicAccessLogger(handler, format, getExtraLogFields) +} + +// LogRequest will inject request host and path to the logged messages +func LogRequest(r *http.Request) *logrus.Entry { + return log.WithFields(log.Fields{ + "host": r.Host, + "path": r.URL.Path, + }) +} diff --git a/internal/logging/logging_test.go b/internal/logging/logging_test.go new file mode 100644 index 000000000..9079cc9dc --- /dev/null +++ b/internal/logging/logging_test.go @@ -0,0 +1,102 @@ +package logging + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/request" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" +) + +type lookupPathFunc func(*http.Request) *serving.LookupPath + +func (f lookupPathFunc) Resolve(r *http.Request) (*serving.Request, error) { + return &serving.Request{LookupPath: f(r)}, nil +} + +func TestGetExtraLogFields(t *testing.T) { + domainWithResolver := &domain.Domain{ + Resolver: lookupPathFunc(func(*http.Request) *serving.LookupPath { + return &serving.LookupPath{ + ServingType: "file", + ProjectID: 100, + Prefix: "/prefix", + } + }), + } + + tests := []struct { + name string + scheme string + host string + domain *domain.Domain + expectedHTTPS interface{} + expectedHost interface{} + expectedProjectID interface{} + expectedProjectPrefix interface{} + expectedServingType interface{} + }{ + { + name: "https", + scheme: request.SchemeHTTPS, + host: "githost.io", + domain: domainWithResolver, + expectedHTTPS: true, + expectedHost: "githost.io", + expectedProjectID: uint64(100), + expectedProjectPrefix: "/prefix", + expectedServingType: "file", + }, + { + name: "http", + scheme: request.SchemeHTTP, + host: "githost.io", + domain: domainWithResolver, + expectedHTTPS: false, + expectedHost: "githost.io", + expectedProjectID: uint64(100), + expectedProjectPrefix: "/prefix", + expectedServingType: "file", + }, + { + name: "domain_without_resolved", + scheme: request.SchemeHTTP, + host: "githost.io", + domain: nil, + expectedHTTPS: false, + expectedHost: "githost.io", + expectedProjectID: nil, + expectedServingType: nil, + }, + { + name: "no_domain", + scheme: request.SchemeHTTP, + host: "githost.io", + domain: nil, + expectedHTTPS: false, + expectedHost: "githost.io", + expectedProjectID: nil, + expectedServingType: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req, err := http.NewRequest("GET", "/", nil) + require.NoError(t, err) + + req.URL.Scheme = tt.scheme + req = request.WithHostAndDomain(req, tt.host, tt.domain) + + got := getExtraLogFields(req) + require.Equal(t, tt.expectedHTTPS, got["pages_https"]) + require.Equal(t, tt.expectedHost, got["pages_host"]) + require.Equal(t, tt.expectedProjectID, got["pages_project_id"]) + require.Equal(t, tt.expectedProjectPrefix, got["pages_project_prefix"]) + require.Equal(t, tt.expectedServingType, got["pages_project_serving_type"]) + }) + } +} diff --git a/internal/middleware/headers.go b/internal/middleware/headers.go new file mode 100644 index 000000000..77b008f39 --- /dev/null +++ b/internal/middleware/headers.go @@ -0,0 +1,31 @@ +package middleware + +import ( + "errors" + "net/http" + "strings" +) + +var errInvalidHeaderParameter = errors.New("invalid syntax specified as header parameter") + +// AddCustomHeaders adds a map of Headers to a Response +func AddCustomHeaders(w http.ResponseWriter, headers http.Header) { + for k, v := range headers { + for _, value := range v { + w.Header().Add(k, value) + } + } +} + +// ParseHeaderString parses a string of key values into a map +func ParseHeaderString(customHeaders []string) (http.Header, error) { + headers := http.Header{} + for _, keyValueString := range customHeaders { + keyValue := strings.SplitN(keyValueString, ":", 2) + if len(keyValue) != 2 { + return nil, errInvalidHeaderParameter + } + headers[strings.TrimSpace(keyValue[0])] = append(headers[strings.TrimSpace(keyValue[0])], strings.TrimSpace(keyValue[1])) + } + return headers, nil +} diff --git a/internal/middleware/headers_test.go b/internal/middleware/headers_test.go new file mode 100644 index 000000000..17d31b50d --- /dev/null +++ b/internal/middleware/headers_test.go @@ -0,0 +1,126 @@ +package middleware + +import ( + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseHeaderString(t *testing.T) { + tests := []struct { + name string + headerStrings []string + valid bool + }{{ + name: "Normal case", + headerStrings: []string{"X-Test-String: Test"}, + valid: true, + }, + { + name: "Whitespace trim case", + headerStrings: []string{" X-Test-String : Test "}, + valid: true, + }, + { + name: "Whitespace in key, value case", + headerStrings: []string{"My amazing header: This is a test"}, + valid: true, + }, + { + name: "Non-tracking header case", + headerStrings: []string{"Tk: N"}, + valid: true, + }, + { + name: "Content security header case", + headerStrings: []string{"content-security-policy: default-src 'self'"}, + valid: true, + }, + { + name: "Multiple header strings", + headerStrings: []string{"content-security-policy: default-src 'self'", "X-Test-String: Test", "My amazing header : Amazing"}, + valid: true, + }, + { + name: "Multiple invalid cases", + headerStrings: []string{"content-security-policy: default-src 'self'", "test-case"}, + valid: false, + }, + { + name: "Not valid case", + headerStrings: []string{"Tk= N"}, + valid: false, + }, + { + name: "Not valid case", + headerStrings: []string{"X-Test-String Some-Test"}, + valid: false, + }, + { + name: "Valid and not valid case", + headerStrings: []string{"content-security-policy: default-src 'self'", "test-case"}, + valid: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := ParseHeaderString(tt.headerStrings) + if tt.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestAddCustomHeaders(t *testing.T) { + tests := []struct { + name string + headerStrings []string + wantHeaders map[string]string + }{{ + name: "Normal case", + headerStrings: []string{"X-Test-String: Test"}, + wantHeaders: map[string]string{"X-Test-String": "Test"}, + }, + { + name: "Whitespace trim case", + headerStrings: []string{" X-Test-String : Test "}, + wantHeaders: map[string]string{"X-Test-String": "Test"}, + }, + { + name: "Whitespace in key, value case", + headerStrings: []string{"My amazing header: This is a test"}, + wantHeaders: map[string]string{"My amazing header": "This is a test"}, + }, + { + name: "Non-tracking header case", + headerStrings: []string{"Tk: N"}, + wantHeaders: map[string]string{"Tk": "N"}, + }, + { + name: "Content security header case", + headerStrings: []string{"content-security-policy: default-src 'self'"}, + wantHeaders: map[string]string{"content-security-policy": "default-src 'self'"}, + }, + { + name: "Multiple header strings", + headerStrings: []string{"content-security-policy: default-src 'self'", "X-Test-String: Test", "My amazing header : Amazing"}, + wantHeaders: map[string]string{"content-security-policy": "default-src 'self'", "X-Test-String": "Test", "My amazing header": "Amazing"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + headers, _ := ParseHeaderString(tt.headerStrings) + w := httptest.NewRecorder() + AddCustomHeaders(w, headers) + for k, v := range tt.wantHeaders { + require.Equal(t, v, w.HeaderMap.Get(k), "Expected header %+v, got %+v", v, w.HeaderMap.Get(k)) + } + }) + } +} diff --git a/internal/mocks/mocks.go b/internal/mocks/mocks.go new file mode 100644 index 000000000..e1f0f6d7e --- /dev/null +++ b/internal/mocks/mocks.go @@ -0,0 +1,129 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: internal/interface.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + http "net/http" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockArtifact is a mock of Artifact interface +type MockArtifact struct { + ctrl *gomock.Controller + recorder *MockArtifactMockRecorder +} + +// MockArtifactMockRecorder is the mock recorder for MockArtifact +type MockArtifactMockRecorder struct { + mock *MockArtifact +} + +// NewMockArtifact creates a new mock instance +func NewMockArtifact(ctrl *gomock.Controller) *MockArtifact { + mock := &MockArtifact{ctrl: ctrl} + mock.recorder = &MockArtifactMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockArtifact) EXPECT() *MockArtifactMockRecorder { + return m.recorder +} + +// TryMakeRequest mocks base method +func (m *MockArtifact) TryMakeRequest(host string, w http.ResponseWriter, r *http.Request, token string, responseHandler func(*http.Response) bool) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TryMakeRequest", host, w, r, token, responseHandler) + ret0, _ := ret[0].(bool) + return ret0 +} + +// TryMakeRequest indicates an expected call of TryMakeRequest +func (mr *MockArtifactMockRecorder) TryMakeRequest(host, w, r, token, responseHandler interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TryMakeRequest", reflect.TypeOf((*MockArtifact)(nil).TryMakeRequest), host, w, r, token, responseHandler) +} + +// MockAuth is a mock of Auth interface +type MockAuth struct { + ctrl *gomock.Controller + recorder *MockAuthMockRecorder +} + +// MockAuthMockRecorder is the mock recorder for MockAuth +type MockAuthMockRecorder struct { + mock *MockAuth +} + +// NewMockAuth creates a new mock instance +func NewMockAuth(ctrl *gomock.Controller) *MockAuth { + mock := &MockAuth{ctrl: ctrl} + mock.recorder = &MockAuthMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAuth) EXPECT() *MockAuthMockRecorder { + return m.recorder +} + +// IsAuthSupported mocks base method +func (m *MockAuth) IsAuthSupported() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsAuthSupported") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsAuthSupported indicates an expected call of IsAuthSupported +func (mr *MockAuthMockRecorder) IsAuthSupported() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsAuthSupported", reflect.TypeOf((*MockAuth)(nil).IsAuthSupported)) +} + +// RequireAuth mocks base method +func (m *MockAuth) RequireAuth(w http.ResponseWriter, r *http.Request) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RequireAuth", w, r) + ret0, _ := ret[0].(bool) + return ret0 +} + +// RequireAuth indicates an expected call of RequireAuth +func (mr *MockAuthMockRecorder) RequireAuth(w, r interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequireAuth", reflect.TypeOf((*MockAuth)(nil).RequireAuth), w, r) +} + +// GetTokenIfExists mocks base method +func (m *MockAuth) GetTokenIfExists(w http.ResponseWriter, r *http.Request) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTokenIfExists", w, r) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTokenIfExists indicates an expected call of GetTokenIfExists +func (mr *MockAuthMockRecorder) GetTokenIfExists(w, r interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTokenIfExists", reflect.TypeOf((*MockAuth)(nil).GetTokenIfExists), w, r) +} + +// CheckResponseForInvalidToken mocks base method +func (m *MockAuth) CheckResponseForInvalidToken(w http.ResponseWriter, r *http.Request, resp *http.Response) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckResponseForInvalidToken", w, r, resp) + ret0, _ := ret[0].(bool) + return ret0 +} + +// CheckResponseForInvalidToken indicates an expected call of CheckResponseForInvalidToken +func (mr *MockAuthMockRecorder) CheckResponseForInvalidToken(w, r, resp interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckResponseForInvalidToken", reflect.TypeOf((*MockAuth)(nil).CheckResponseForInvalidToken), w, r, resp) +} diff --git a/internal/netutil/shared_limit_listener.go b/internal/netutil/shared_limit_listener.go new file mode 100644 index 000000000..3f88e5918 --- /dev/null +++ b/internal/netutil/shared_limit_listener.go @@ -0,0 +1,113 @@ +package netutil + +import ( + "errors" + "net" + "sync" + "time" +) + +var ( + errKeepaliveNotSupported = errors.New("Keepalive not supported") +) + +// SharedLimitListener returns a Listener that accepts simultaneous +// connections from the provided Listener only if a shared availability pool +// permits it. Based on https://godoc.org/golang.org/x/net/netutil +func SharedLimitListener(listener net.Listener, limiter *Limiter) net.Listener { + return &sharedLimitListener{ + Listener: listener, + limiter: limiter, + done: make(chan struct{}), + } +} + +// Limiter is used to provide a shared pool of connection slots. Use NewLimiter +// to create an instance +type Limiter struct { + sem chan struct{} +} + +// NewLimiter creates a Limiter with the given capacity +func NewLimiter(n int) *Limiter { + return &Limiter{ + sem: make(chan struct{}, n), + } +} + +type sharedLimitListener struct { + net.Listener + closeOnce sync.Once // ensures the done chan is only closed once + limiter *Limiter // A pool of connection slots shared with other listeners + done chan struct{} // no values sent; closed when Close is called +} + +// acquire acquires the limiting semaphore. Returns true if successfully +// accquired, false if the listener is closed and the semaphore is not +// acquired. +func (l *sharedLimitListener) acquire() bool { + select { + case <-l.done: + return false + case l.limiter.sem <- struct{}{}: + return true + } +} +func (l *sharedLimitListener) release() { <-l.limiter.sem } + +func (l *sharedLimitListener) Accept() (net.Conn, error) { + acquired := l.acquire() + // If the semaphore isn't acquired because the listener was closed, expect + // that this call to accept won't block, but immediately return an error. + c, err := l.Listener.Accept() + if err != nil { + if acquired { + l.release() + } + return nil, err + } + + // Support TCP Keepalive operations if possible + tcpConn, _ := c.(*net.TCPConn) + + return &sharedLimitListenerConn{ + Conn: c, + tcpConn: tcpConn, + release: l.release, + }, nil +} + +func (l *sharedLimitListener) Close() error { + err := l.Listener.Close() + l.closeOnce.Do(func() { close(l.done) }) + return err +} + +type sharedLimitListenerConn struct { + net.Conn + tcpConn *net.TCPConn + releaseOnce sync.Once + release func() +} + +func (c *sharedLimitListenerConn) Close() error { + err := c.Conn.Close() + c.releaseOnce.Do(c.release) + return err +} + +func (c *sharedLimitListenerConn) SetKeepAlive(enabled bool) error { + if c.tcpConn == nil { + return errKeepaliveNotSupported + } + + return c.tcpConn.SetKeepAlive(enabled) +} + +func (c *sharedLimitListenerConn) SetKeepAlivePeriod(period time.Duration) error { + if c.tcpConn == nil { + return errKeepaliveNotSupported + } + + return c.tcpConn.SetKeepAlivePeriod(period) +} diff --git a/internal/redirects/redirects.go b/internal/redirects/redirects.go new file mode 100644 index 000000000..48dc2c05e --- /dev/null +++ b/internal/redirects/redirects.go @@ -0,0 +1,202 @@ +// Package redirects provides functions for parsing and rewriting URLs +// according to Netlify style _redirects syntax +package redirects + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" + + netlifyRedirects "github.com/tj/go-redirects" + + "gitlab.com/gitlab-org/labkit/log" + + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" +) + +const ( + // ConfigFile is the default name of the file containing the redirect rules. + // It follows Netlify's syntax but we don't support the special options yet like splats, placeholders, query parameters + // - https://docs.netlify.com/routing/redirects/ + // - https://docs.netlify.com/routing/redirects/redirect-options/ + ConfigFile = "_redirects" + + // Check https://gitlab.com/gitlab-org/gitlab-pages/-/issues/472 before increasing this value + maxConfigSize = 64 * 1024 +) + +var ( + // ErrNoRedirect is the error thrown when a no redirect rule matches while trying to Rewrite URL. + // This means that no redirect applies to the URL and you can fallback to serving actual content instead. + ErrNoRedirect = errors.New("no redirect found") + errConfigNotFound = errors.New("_redirects file not found") + errNeedRegularFile = errors.New("_redirects needs to be a regular file (not a directory)") + errFileTooLarge = errors.New("_redirects file too large") + errFailedToOpenConfig = errors.New("unable to open _redirects file") + errFailedToParseConfig = errors.New("failed to parse _redirects file") + errFailedToParseURL = errors.New("unable to parse URL") + errNoDomainLevelRedirects = errors.New("no domain-level redirects to outside sites") + errNoStartingForwardSlashInURLPath = errors.New("url path must start with forward slash /") + errNoSplats = errors.New("splats are not supported") + errNoPlaceholders = errors.New("placeholders are not supported") + errNoParams = errors.New("params not supported") + errUnsupportedStatus = errors.New("status not supported") + errNoForce = errors.New("force! not supported") + regexpPlaceholder = regexp.MustCompile(`(?i)/:[a-z]+`) +) + +type Redirects struct { + rules []netlifyRedirects.Rule + error error +} + +// Status maps over each redirect rule and returns any error message +func (r *Redirects) Status() string { + if r.error != nil { + return fmt.Sprintf("parse error: %s", r.error.Error()) + } + + messages := make([]string, 0, len(r.rules)+1) + messages = append(messages, fmt.Sprintf("%d rules", len(r.rules))) + + for i, rule := range r.rules { + if err := validateRule(rule); err != nil { + messages = append(messages, fmt.Sprintf("rule %d: error: %s", i+1, err.Error())) + } else { + messages = append(messages, fmt.Sprintf("rule %d: valid", i+1)) + } + } + + return strings.Join(messages, "\n") +} + +func validateURL(urlText string) error { + url, err := url.Parse(urlText) + if err != nil { + return errFailedToParseURL + } + + // No support for domain-level redirects to outside sites: + // - `https://google.com` + // - `//google.com` + if url.Host != "" || url.Scheme != "" { + return errNoDomainLevelRedirects + } + + // No parent traversing relative URL's with `./` or `../` + // No ambiguous URLs like bare domains `GitLab.com` + if !strings.HasPrefix(url.Path, "/") { + return errNoStartingForwardSlashInURLPath + } + + // No support for splats, https://docs.netlify.com/routing/redirects/redirect-options/#splats + if strings.Contains(url.Path, "*") { + return errNoSplats + } + + // No support for placeholders, https://docs.netlify.com/routing/redirects/redirect-options/#placeholders + if regexpPlaceholder.MatchString(url.Path) { + return errNoPlaceholders + } + + return nil +} + +func validateRule(r netlifyRedirects.Rule) error { + if err := validateURL(r.From); err != nil { + return err + } + + if err := validateURL(r.To); err != nil { + return err + } + + // No support for query parameters, https://docs.netlify.com/routing/redirects/redirect-options/#query-parameters + if r.Params != nil { + return errNoParams + } + + // We strictly validate return status codes + switch r.Status { + case http.StatusMovedPermanently, http.StatusFound: + // noop + default: + return errUnsupportedStatus + } + + // No support for rules that use ! force + if r.Force { + return errNoForce + } + + return nil +} + +func normalizePath(path string) string { + return strings.TrimSuffix(path, "/") + "/" +} + +func (r *Redirects) match(url *url.URL) *netlifyRedirects.Rule { + for _, rule := range r.rules { + // TODO: Likely this should include host comparison once we have domain-level redirects + if normalizePath(rule.From) == normalizePath(url.Path) && validateRule(rule) == nil { + return &rule + } + } + + return nil +} + +// Rewrite takes in a URL and uses the parsed Netlify rules to rewrite +// the URL to the new location if it matches any rule +func (r *Redirects) Rewrite(url *url.URL) (*url.URL, int, error) { + rule := r.match(url) + if rule == nil { + return nil, 0, ErrNoRedirect + } + + newURL, err := url.Parse(rule.To) + log.WithFields(log.Fields{ + "url": url, + "newURL": newURL, + "err": err, + "rule.From": rule.From, + "rule.To": rule.To, + "rule.Status": rule.Status, + }).Debug("Rewrite") + return newURL, rule.Status, err +} + +// ParseRedirects decodes Netlify style redirects from the projects `.../public/_redirects` +// https://docs.netlify.com/routing/redirects/#syntax-for-the-redirects-file +func ParseRedirects(ctx context.Context, root vfs.Root) *Redirects { + fi, err := root.Lstat(ctx, ConfigFile) + if err != nil { + return &Redirects{error: errConfigNotFound} + } + + if !fi.Mode().IsRegular() { + return &Redirects{error: errNeedRegularFile} + } + + if fi.Size() > maxConfigSize { + return &Redirects{error: errFileTooLarge} + } + + reader, err := root.Open(ctx, ConfigFile) + if err != nil { + return &Redirects{error: errFailedToOpenConfig} + } + defer reader.Close() + + redirectRules, err := netlifyRedirects.Parse(reader) + if err != nil { + return &Redirects{error: errFailedToParseConfig} + } + + return &Redirects{rules: redirectRules} +} diff --git a/internal/redirects/redirects_benchmark_test.go b/internal/redirects/redirects_benchmark_test.go new file mode 100644 index 000000000..1a05a00cc --- /dev/null +++ b/internal/redirects/redirects_benchmark_test.go @@ -0,0 +1,69 @@ +package redirects + +import ( + "context" + "io/ioutil" + "net/url" + "path" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" +) + +func generateRedirectsFile(dirPath string, count int) error { + content := strings.Repeat("/goto.html /target.html 301\n", count) + content = content + "/entrance.html /exit.html 301\n" + + return ioutil.WriteFile(path.Join(dirPath, ConfigFile), []byte(content), 0600) +} + +func benchmarkRedirectsRewrite(b *testing.B, redirectsCount int) { + ctx := context.Background() + + root, tmpDir, cleanup := testhelpers.TmpDir(nil, "ParseRedirects_benchmarks") + defer cleanup() + + err := generateRedirectsFile(tmpDir, redirectsCount) + require.NoError(b, err) + + url, err := url.Parse("/entrance.html") + require.NoError(b, err) + + redirects := ParseRedirects(ctx, root) + require.NoError(b, redirects.error) + + for i := 0; i < b.N; i++ { + _, _, err := redirects.Rewrite(url) + require.NoError(b, err) + } +} + +func BenchmarkRedirectsRewrite(b *testing.B) { + b.Run("10 redirects", func(b *testing.B) { benchmarkRedirectsRewrite(b, 10) }) + b.Run("100 redirects", func(b *testing.B) { benchmarkRedirectsRewrite(b, 100) }) + b.Run("1000 redirects", func(b *testing.B) { benchmarkRedirectsRewrite(b, 1000) }) +} + +func benchmarkRedirectsParseRedirects(b *testing.B, redirectsCount int) { + ctx := context.Background() + + root, tmpDir, cleanup := testhelpers.TmpDir(nil, "ParseRedirects_benchmarks") + defer cleanup() + + err := generateRedirectsFile(tmpDir, redirectsCount) + require.NoError(b, err) + + for i := 0; i < b.N; i++ { + redirects := ParseRedirects(ctx, root) + require.NoError(b, redirects.error) + } +} + +func BenchmarkRedirectsParseRedirects(b *testing.B) { + b.Run("10 redirects", func(b *testing.B) { benchmarkRedirectsParseRedirects(b, 10) }) + b.Run("100 redirects", func(b *testing.B) { benchmarkRedirectsParseRedirects(b, 100) }) + b.Run("1000 redirects", func(b *testing.B) { benchmarkRedirectsParseRedirects(b, 1000) }) +} diff --git a/internal/redirects/redirects_test.go b/internal/redirects/redirects_test.go new file mode 100644 index 000000000..f538fc4bc --- /dev/null +++ b/internal/redirects/redirects_test.go @@ -0,0 +1,296 @@ +package redirects + +import ( + "context" + "io/ioutil" + "net/url" + "path" + "strings" + "testing" + + "github.com/stretchr/testify/require" + netlifyRedirects "github.com/tj/go-redirects" + + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" +) + +func TestRedirectsValidateUrl(t *testing.T) { + tests := []struct { + name string + url string + expectedErr string + }{ + { + name: "Valid url", + url: "/goto.html", + expectedErr: "", + }, + { + name: "No domain-level redirects", + url: "https://GitLab.com", + expectedErr: errNoDomainLevelRedirects.Error(), + }, + { + name: "No Schema-less URL domain-level redirects", + url: "//GitLab.com/pages.html", + expectedErr: errNoDomainLevelRedirects.Error(), + }, + { + name: "No bare domain-level redirects", + url: "GitLab.com", + expectedErr: errNoStartingForwardSlashInURLPath.Error(), + }, + { + name: "No parent traversing relative URL", + url: "../target.html", + expectedErr: errNoStartingForwardSlashInURLPath.Error(), + }, + { + name: "No splats", + url: "/blog/*", + expectedErr: errNoSplats.Error(), + }, + { + name: "No Placeholders", + url: "/news/:year/:month/:date/:slug", + expectedErr: errNoPlaceholders.Error(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateURL(tt.url) + if tt.expectedErr != "" { + require.EqualError(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestRedirectsValidateRule(t *testing.T) { + tests := []struct { + name string + rule string + expectedErr string + }{ + { + name: "valid rule", + rule: "/goto.html /target.html 301", + expectedErr: "", + }, + { + name: "invalid From URL", + rule: "invalid.com /teapot.html 302", + expectedErr: errNoStartingForwardSlashInURLPath.Error(), + }, + { + name: "invalid To URL", + rule: "/goto.html invalid.com", + expectedErr: errNoStartingForwardSlashInURLPath.Error(), + }, + { + name: "No parameters", + rule: "/ /something 302 foo=bar", + expectedErr: errNoParams.Error(), + }, + { + name: "Invalid status", + rule: "/goto.html /target.html 418", + expectedErr: errUnsupportedStatus.Error(), + }, + { + name: "Force not supported", + rule: "/goto.html /target.html 302!", + expectedErr: errNoForce.Error(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rules, err := netlifyRedirects.ParseString(tt.rule) + require.NoError(t, err) + + err = validateRule(rules[0]) + if tt.expectedErr != "" { + require.EqualError(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestRedirectsRewrite(t *testing.T) { + tests := []struct { + name string + url string + rule string + expectedURL string + expectedStatus int + expectedErr string + }{ + { + name: "No rules given", + url: "/no-redirect/", + rule: "", + expectedURL: "", + expectedStatus: 0, + expectedErr: ErrNoRedirect.Error(), + }, + { + name: "No matching rules", + url: "/no-redirect/", + rule: "/cake-portal.html /still-alive.html 301", + expectedURL: "", + expectedStatus: 0, + expectedErr: ErrNoRedirect.Error(), + }, + { + name: "Matching rule redirects", + url: "/cake-portal.html", + rule: "/cake-portal.html /still-alive.html 301", + expectedURL: "/still-alive.html", + expectedStatus: 301, + expectedErr: "", + }, + { + name: "Does not redirect to invalid rule", + url: "/goto.html", + rule: "/goto.html GitLab.com 301", + expectedURL: "", + expectedStatus: 0, + expectedErr: ErrNoRedirect.Error(), + }, + { + name: "Matches trailing slash rule to no trailing slash URL", + url: "/cake-portal", + rule: "/cake-portal/ /still-alive/ 301", + expectedURL: "/still-alive/", + expectedStatus: 301, + expectedErr: "", + }, + { + name: "Matches trailing slash rule to trailing slash URL", + url: "/cake-portal/", + rule: "/cake-portal/ /still-alive/ 301", + expectedURL: "/still-alive/", + expectedStatus: 301, + expectedErr: "", + }, + { + name: "Matches no trailing slash rule to no trailing slash URL", + url: "/cake-portal", + rule: "/cake-portal /still-alive 301", + expectedURL: "/still-alive", + expectedStatus: 301, + expectedErr: "", + }, + { + name: "Matches no trailing slash rule to trailing slash URL", + url: "/cake-portal/", + rule: "/cake-portal /still-alive 301", + expectedURL: "/still-alive", + expectedStatus: 301, + expectedErr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := Redirects{} + + if tt.rule != "" { + rules, err := netlifyRedirects.ParseString(tt.rule) + require.NoError(t, err) + r.rules = rules + } + + url, err := url.Parse(tt.url) + require.NoError(t, err) + + toURL, status, err := r.Rewrite(url) + + if tt.expectedURL != "" { + require.Equal(t, tt.expectedURL, toURL.String()) + } else { + require.Nil(t, toURL) + } + + require.Equal(t, tt.expectedStatus, status) + + if tt.expectedErr != "" { + require.EqualError(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestRedirectsParseRedirects(t *testing.T) { + ctx := context.Background() + + root, tmpDir, cleanup := testhelpers.TmpDir(t, "ParseRedirects_tests") + defer cleanup() + + tests := []struct { + name string + redirectsFile string + expectedRules int + expectedErr string + }{ + { + name: "No `_redirects` file present", + redirectsFile: "", + expectedRules: 0, + expectedErr: errConfigNotFound.Error(), + }, + { + name: "Everything working as expected", + redirectsFile: `/goto.html /target.html 301`, + expectedRules: 1, + expectedErr: "", + }, + { + name: "Invalid _redirects syntax gives no rules", + redirectsFile: `foobar::baz`, + expectedRules: 0, + expectedErr: "", + }, + { + name: "Config file too big", + redirectsFile: strings.Repeat("a", 2*maxConfigSize), + expectedRules: 0, + expectedErr: errFileTooLarge.Error(), + }, + // In future versions of `github.com/tj/go-redirects`, + // this may not throw a parsing error and this test could be removed + { + name: "Parsing error is caught", + redirectsFile: "/store id=:id /blog/:id 301", + expectedRules: 0, + expectedErr: errFailedToParseConfig.Error(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.redirectsFile != "" { + err := ioutil.WriteFile(path.Join(tmpDir, ConfigFile), []byte(tt.redirectsFile), 0600) + require.NoError(t, err) + } + + redirects := ParseRedirects(ctx, root) + + if tt.expectedErr != "" { + require.EqualError(t, redirects.error, tt.expectedErr) + } else { + require.NoError(t, redirects.error) + } + + require.Len(t, redirects.rules, tt.expectedRules) + }) + } +} diff --git a/internal/rejectmethods/middleware.go b/internal/rejectmethods/middleware.go new file mode 100644 index 000000000..e78a0ce59 --- /dev/null +++ b/internal/rejectmethods/middleware.go @@ -0,0 +1,31 @@ +package rejectmethods + +import ( + "net/http" + + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +var acceptedMethods = map[string]bool{ + http.MethodGet: true, + http.MethodHead: true, + http.MethodPost: true, + http.MethodPut: true, + http.MethodPatch: true, + http.MethodDelete: true, + http.MethodConnect: true, + http.MethodOptions: true, + http.MethodTrace: true, +} + +// NewMiddleware returns middleware which rejects all unknown http methods +func NewMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if acceptedMethods[r.Method] { + handler.ServeHTTP(w, r) + } else { + metrics.RejectedRequestsCount.Inc() + http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } + }) +} diff --git a/internal/rejectmethods/middleware_test.go b/internal/rejectmethods/middleware_test.go new file mode 100644 index 000000000..2921975ae --- /dev/null +++ b/internal/rejectmethods/middleware_test.go @@ -0,0 +1,43 @@ +package rejectmethods + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewMiddleware(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "OK\n") + }) + + middleware := NewMiddleware(handler) + + acceptedMethods := []string{"GET", "HEAD", "POST", "PUT", "PATCH", "CONNECT", "OPTIONS", "TRACE"} + for _, method := range acceptedMethods { + t.Run(method, func(t *testing.T) { + tmpRequest, _ := http.NewRequest(method, "/", nil) + recorder := httptest.NewRecorder() + + middleware.ServeHTTP(recorder, tmpRequest) + + result := recorder.Result() + + require.Equal(t, http.StatusOK, result.StatusCode) + }) + } + + t.Run("UNKNOWN", func(t *testing.T) { + tmpRequest, _ := http.NewRequest("UNKNOWN", "/", nil) + recorder := httptest.NewRecorder() + + middleware.ServeHTTP(recorder, tmpRequest) + + result := recorder.Result() + + require.Equal(t, http.StatusMethodNotAllowed, result.StatusCode) + }) +} diff --git a/internal/request/request.go b/internal/request/request.go new file mode 100644 index 000000000..cbda16e5c --- /dev/null +++ b/internal/request/request.go @@ -0,0 +1,57 @@ +package request + +import ( + "context" + "net" + "net/http" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" +) + +type ctxKey string + +const ( + ctxHostKey ctxKey = "host" + ctxDomainKey ctxKey = "domain" + + // SchemeHTTP name for the HTTP scheme + SchemeHTTP = "http" + // SchemeHTTPS name for the HTTPS scheme + SchemeHTTPS = "https" +) + +// IsHTTPS checks whether the request originated from HTTP or HTTPS. +// It checks the value from r.URL.Scheme +func IsHTTPS(r *http.Request) bool { + return r.URL.Scheme == SchemeHTTPS +} + +// WithHostAndDomain saves host name and domain in the request's context +func WithHostAndDomain(r *http.Request, host string, domain *domain.Domain) *http.Request { + ctx := r.Context() + ctx = context.WithValue(ctx, ctxHostKey, host) + ctx = context.WithValue(ctx, ctxDomainKey, domain) + + return r.WithContext(ctx) +} + +// GetHost extracts the host from request's context +func GetHost(r *http.Request) string { + return r.Context().Value(ctxHostKey).(string) +} + +// GetDomain extracts the domain from request's context +func GetDomain(r *http.Request) *domain.Domain { + return r.Context().Value(ctxDomainKey).(*domain.Domain) +} + +// GetHostWithoutPort returns a host without the port. The host(:port) comes +// from a Host: header if it is provided, otherwise it is a server name. +func GetHostWithoutPort(r *http.Request) string { + host, _, err := net.SplitHostPort(r.Host) + if err != nil { + return r.Host + } + + return host +} diff --git a/internal/request/request_test.go b/internal/request/request_test.go new file mode 100644 index 000000000..a9ffb223b --- /dev/null +++ b/internal/request/request_test.go @@ -0,0 +1,89 @@ +package request + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" +) + +func TestIsHTTPS(t *testing.T) { + t.Run("when scheme is http", func(t *testing.T) { + httpRequest, err := http.NewRequest("GET", "/", nil) + require.NoError(t, err) + httpRequest.URL.Scheme = SchemeHTTP + require.False(t, IsHTTPS(httpRequest)) + }) + + t.Run("when scheme is https", func(t *testing.T) { + httpsRequest, err := http.NewRequest("GET", "/", nil) + require.NoError(t, err) + httpsRequest.URL.Scheme = SchemeHTTPS + require.True(t, IsHTTPS(httpsRequest)) + }) +} + +func TestPanics(t *testing.T) { + r, err := http.NewRequest("GET", "/", nil) + require.NoError(t, err) + + require.Panics(t, func() { + GetHost(r) + }) + + require.Panics(t, func() { + GetDomain(r) + }) +} + +func TestWithHostAndDomain(t *testing.T) { + tests := []struct { + name string + host string + domain *domain.Domain + }{ + { + name: "values", + host: "gitlab.com", + domain: &domain.Domain{}, + }, + { + name: "no_host", + host: "", + domain: &domain.Domain{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r, err := http.NewRequest("GET", "/", nil) + require.NoError(t, err) + + r = WithHostAndDomain(r, tt.host, tt.domain) + require.Exactly(t, tt.domain, GetDomain(r)) + require.Equal(t, tt.host, GetHost(r)) + }) + } +} + +func TestGetHostWithoutPort(t *testing.T) { + t.Run("when port component is provided", func(t *testing.T) { + request := httptest.NewRequest("GET", "https://example.com:443", nil) + request.Host = "my.example.com:8080" + + host := GetHostWithoutPort(request) + + require.Equal(t, "my.example.com", host) + }) + + t.Run("when port component is not provided", func(t *testing.T) { + request := httptest.NewRequest("GET", "http://example.com", nil) + request.Host = "my.example.com" + + host := GetHostWithoutPort(request) + + require.Equal(t, "my.example.com", host) + }) +} diff --git a/internal/serving/disk/errors.go b/internal/serving/disk/errors.go new file mode 100644 index 000000000..5e55220be --- /dev/null +++ b/internal/serving/disk/errors.go @@ -0,0 +1,18 @@ +package disk + +type locationDirectoryError struct { + FullPath string + RelativePath string +} + +type locationFileNoExtensionError struct { + FullPath string +} + +func (l *locationDirectoryError) Error() string { + return "location error accessing directory where file expected" +} + +func (l *locationFileNoExtensionError) Error() string { + return "error accessing a path without an extension" +} diff --git a/internal/serving/disk/helpers.go b/internal/serving/disk/helpers.go new file mode 100644 index 000000000..5456724ad --- /dev/null +++ b/internal/serving/disk/helpers.go @@ -0,0 +1,101 @@ +package disk + +import ( + "context" + "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + + "gitlab.com/gitlab-org/gitlab-pages/internal/httputil" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" +) + +var compressedEncodings = map[string]string{ + "br": ".br", + "gzip": ".gz", +} + +// Server side content encoding priority. +// Map iteration order is not deterministic in go, so we need this array to specify the priority +// when the client doesn't provide one +var compressedEncodingsPriority = []string{ + "br", + "gzip", +} + +func endsWithSlash(path string) bool { + return strings.HasSuffix(path, "/") +} + +func endsWithoutHTMLExtension(path string) bool { + return !strings.HasSuffix(path, ".html") +} + +// Detect file's content-type either by extension or mime-sniffing. +// Implementation is adapted from Golang's `http.serveContent()` +// See https://github.com/golang/go/blob/902fc114272978a40d2e65c2510a18e870077559/src/net/http/fs.go#L194 +func (reader *Reader) detectContentType(ctx context.Context, root vfs.Root, path string) (string, error) { + contentType := mime.TypeByExtension(filepath.Ext(path)) + + if contentType == "" { + var buf [512]byte + + file, err := root.Open(ctx, path) + if err != nil { + return "", err + } + + defer file.Close() + + // Using `io.ReadFull()` because `file.Read()` may be chunked. + // Ignoring errors because we don't care if the 512 bytes cannot be read. + n, _ := io.ReadFull(file, buf[:]) + contentType = http.DetectContentType(buf[:n]) + } + + return contentType, nil +} + +func (reader *Reader) handleContentEncoding(ctx context.Context, w http.ResponseWriter, r *http.Request, root vfs.Root, fullPath string) string { + // don't accept range requests for compressed content + if r.Header.Get("Range") != "" { + return fullPath + } + + files := map[string]os.FileInfo{} + + // finding compressed files + for encoding, extension := range compressedEncodings { + path := fullPath + extension + + // Ensure the file is not a symlink + if fi, err := root.Lstat(ctx, path); err == nil && fi.Mode().IsRegular() { + files[encoding] = fi + } + } + + offers := make([]string, 0, len(files)+1) + for _, encoding := range compressedEncodingsPriority { + if _, ok := files[encoding]; ok { + offers = append(offers, encoding) + } + } + offers = append(offers, "identity") + + acceptedEncoding := httputil.NegotiateContentEncoding(r, offers) + + if fi, ok := files[acceptedEncoding]; ok { + w.Header().Set("Content-Encoding", acceptedEncoding) + + // http.ServeContent doesn't set Content-Length if Content-Encoding is set + w.Header().Set("Content-Length", strconv.FormatInt(fi.Size(), 10)) + + return fullPath + compressedEncodings[acceptedEncoding] + } + + return fullPath +} diff --git a/internal/serving/disk/local/serving.go b/internal/serving/disk/local/serving.go new file mode 100644 index 000000000..3b23b8e97 --- /dev/null +++ b/internal/serving/disk/local/serving.go @@ -0,0 +1,16 @@ +package local + +import ( + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs/local" +) + +var instance = disk.New(vfs.Instrumented(&local.VFS{})) + +// Instance returns a serving instance that is capable of reading files +// from the disk +func Instance() serving.Serving { + return instance +} diff --git a/internal/serving/disk/local/serving_test.go b/internal/serving/disk/local/serving_test.go new file mode 100644 index 000000000..2602451f1 --- /dev/null +++ b/internal/serving/disk/local/serving_test.go @@ -0,0 +1,98 @@ +package local + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" +) + +func TestDisk_ServeFileHTTP(t *testing.T) { + defer setUpTests(t)() + + tests := map[string]struct { + vfsPath string + path string + expectedStatus int + expectedBody string + }{ + "accessing /index.html": { + vfsPath: "group/serving/public", + path: "/index.html", + expectedStatus: http.StatusOK, + expectedBody: "HTML Document", + }, + "accessing /": { + vfsPath: "group/serving/public", + path: "/", + expectedStatus: http.StatusOK, + expectedBody: "HTML Document", + }, + "accessing without /": { + vfsPath: "group/serving/public", + path: "", + expectedStatus: http.StatusFound, + expectedBody: `Found.`, + }, + "accessing vfs path that is missing": { + vfsPath: "group/serving/public-missing", + path: "/index.html", + // we expect the status to not be set + expectedStatus: 0, + }, + "accessing vfs path that is forbidden (like file)": { + vfsPath: "group/serving/public/index.html", + path: "/index.html", + expectedStatus: http.StatusInternalServerError, + }, + } + + s := Instance() + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w := httptest.NewRecorder() + w.Code = 0 // ensure that code is not set, and it is being set by handler + r := httptest.NewRequest("GET", "http://group.gitlab-example.com/serving"+test.path, nil) + + handler := serving.Handler{ + Writer: w, + Request: r, + LookupPath: &serving.LookupPath{ + Prefix: "/serving/", + Path: test.vfsPath, + }, + SubPath: test.path, + } + + if test.expectedStatus == 0 { + require.False(t, s.ServeFileHTTP(handler)) + require.Zero(t, w.Code, "we expect status to not be set") + return + } + + require.True(t, s.ServeFileHTTP(handler)) + + resp := w.Result() + defer resp.Body.Close() + + require.Equal(t, test.expectedStatus, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Contains(t, string(body), test.expectedBody) + }) + } +} + +var chdirSet = false + +func setUpTests(t testing.TB) func() { + t.Helper() + return testhelpers.ChdirInPath(t, "../../../../shared/pages", &chdirSet) +} diff --git a/internal/serving/disk/reader.go b/internal/serving/disk/reader.go new file mode 100644 index 000000000..12223bad4 --- /dev/null +++ b/internal/serving/disk/reader.go @@ -0,0 +1,274 @@ +package disk + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "gitlab.com/gitlab-org/labkit/errortracking" + + "gitlab.com/gitlab-org/gitlab-pages/internal/httperrors" + "gitlab.com/gitlab-org/gitlab-pages/internal/redirects" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk/symlink" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" +) + +// Reader is a disk access driver +type Reader struct { + fileSizeMetric *prometheus.HistogramVec + vfs vfs.VFS +} + +// Show the user some validation messages for their _redirects file +func (reader *Reader) serveRedirectsStatus(h serving.Handler, redirects *redirects.Redirects) { + h.Writer.Header().Set("Content-Type", "text/plain; charset=utf-8") + h.Writer.Header().Set("X-Content-Type-Options", "nosniff") + h.Writer.WriteHeader(http.StatusOK) + fmt.Fprintln(h.Writer, redirects.Status()) +} + +// tryRedirects returns true if it successfully handled request +func (reader *Reader) tryRedirects(h serving.Handler) bool { + ctx := h.Request.Context() + root, err := reader.vfs.Root(ctx, h.LookupPath.Path) + if vfs.IsNotExist(err) { + return false + } else if err != nil { + httperrors.Serve500WithRequest(h.Writer, h.Request, "vfs.Root", err) + return true + } + + r := redirects.ParseRedirects(ctx, root) + + rewrittenURL, status, err := r.Rewrite(h.Request.URL) + if err != nil { + if err != redirects.ErrNoRedirect { + // We assume that rewrite failure is not fatal + // and we only capture the error + errortracking.Capture(err, errortracking.WithRequest(h.Request)) + } + return false + } + + http.Redirect(h.Writer, h.Request, rewrittenURL.Path, status) + return true +} + +// tryFile returns true if it successfully handled request +func (reader *Reader) tryFile(h serving.Handler) bool { + ctx := h.Request.Context() + + root, err := reader.vfs.Root(ctx, h.LookupPath.Path) + if vfs.IsNotExist(err) { + return false + } else if err != nil { + httperrors.Serve500WithRequest(h.Writer, h.Request, + "vfs.Root", err) + return true + } + + fullPath, err := reader.resolvePath(ctx, root, h.SubPath) + + request := h.Request + urlPath := request.URL.Path + + if locationError, _ := err.(*locationDirectoryError); locationError != nil { + if endsWithSlash(urlPath) { + fullPath, err = reader.resolvePath(ctx, root, h.SubPath, "index.html") + } else { + http.Redirect(h.Writer, h.Request, redirectPath(h.Request), 302) + return true + } + } + + if locationError, _ := err.(*locationFileNoExtensionError); locationError != nil { + fullPath, err = reader.resolvePath(ctx, root, strings.TrimSuffix(h.SubPath, "/")+".html") + } + + if err != nil { + // We assume that this is mostly missing file type of the error + // and additional handlers should try to process the request + return false + } + + // Serve status of `_redirects` under `_redirects` + // We check if the final resolved path is `_redirects` after symlink traversal + if fullPath == redirects.ConfigFile { + if os.Getenv("FF_ENABLE_REDIRECTS") != "false" { + r := redirects.ParseRedirects(ctx, root) + reader.serveRedirectsStatus(h, r) + return true + } + + h.Writer.WriteHeader(http.StatusForbidden) + return true + } + + return reader.serveFile(ctx, h.Writer, h.Request, root, fullPath, h.LookupPath.HasAccessControl) +} + +func redirectPath(request *http.Request) string { + url := *request.URL + + // This ensures that path starts with `///` + url.Scheme = "" + url.Host = request.Host + url.Path = strings.TrimPrefix(url.Path, "/") + "/" + + return strings.TrimSuffix(url.String(), "?") +} + +func (reader *Reader) tryNotFound(h serving.Handler) bool { + ctx := h.Request.Context() + + root, err := reader.vfs.Root(ctx, h.LookupPath.Path) + if vfs.IsNotExist(err) { + return false + } else if err != nil { + httperrors.Serve500WithRequest(h.Writer, h.Request, "vfs.Root", err) + return true + } + + page404, err := reader.resolvePath(ctx, root, "404.html") + if err != nil { + // We assume that this is mostly missing file type of the error + // and additional handlers should try to process the request + return false + } + + err = reader.serveCustomFile(ctx, h.Writer, h.Request, http.StatusNotFound, root, page404) + if err != nil { + httperrors.Serve500WithRequest(h.Writer, h.Request, "serveCustomFile", err) + return true + } + + return true +} + +// Resolve the HTTP request to a path on disk, converting requests for +// directories to requests for index.html inside the directory if appropriate. +func (reader *Reader) resolvePath(ctx context.Context, root vfs.Root, subPath ...string) (string, error) { + // Don't use filepath.Join as cleans the path, + // where we want to traverse full path as supplied by user + // (including ..) + testPath := strings.Join(subPath, "/") + fullPath, err := symlink.EvalSymlinks(ctx, root, testPath) + + if err != nil { + if endsWithoutHTMLExtension(testPath) { + return "", &locationFileNoExtensionError{ + FullPath: fullPath, + } + } + + return "", err + } + + fi, err := root.Lstat(ctx, fullPath) + if err != nil { + return "", err + } + + // The requested path is a directory, so try index.html via recursion + if fi.IsDir() { + return "", &locationDirectoryError{ + FullPath: fullPath, + RelativePath: testPath, + } + } + + // The file exists, but is not a supported type to serve. Perhaps a block + // special device or something else that may be a security risk. + if !fi.Mode().IsRegular() { + return "", fmt.Errorf("%s: is not a regular file", fullPath) + } + + return fullPath, nil +} + +func (reader *Reader) serveFile(ctx context.Context, w http.ResponseWriter, r *http.Request, root vfs.Root, origPath string, accessControl bool) bool { + fullPath := reader.handleContentEncoding(ctx, w, r, root, origPath) + + file, err := root.Open(ctx, fullPath) + if err != nil { + httperrors.Serve500WithRequest(w, r, "root.Open", err) + return true + } + + defer file.Close() + + fi, err := root.Lstat(ctx, fullPath) + if err != nil { + httperrors.Serve500WithRequest(w, r, "root.Lstat", err) + return true + } + + if !accessControl { + // Set caching headers + w.Header().Set("Cache-Control", "max-age=600") + w.Header().Set("Expires", time.Now().Add(10*time.Minute).Format(time.RFC1123)) + } + + contentType, err := reader.detectContentType(ctx, root, origPath) + if err != nil { + httperrors.Serve500WithRequest(w, r, "detectContentType", err) + return true + } + + w.Header().Set("Content-Type", contentType) + + reader.fileSizeMetric.WithLabelValues(reader.vfs.Name()).Observe(float64(fi.Size())) + + // Support vfs.SeekableFile if available (uncompressed files) + if rs, ok := file.(vfs.SeekableFile); ok { + http.ServeContent(w, r, origPath, fi.ModTime(), rs) + } else { + // compressed files will be served by io.Copy + // TODO: Add extra headers https://gitlab.com/gitlab-org/gitlab-pages/-/issues/466 + w.Header().Set("Content-Length", strconv.FormatInt(fi.Size(), 10)) + io.Copy(w, file) + } + + return true +} + +func (reader *Reader) serveCustomFile(ctx context.Context, w http.ResponseWriter, r *http.Request, code int, root vfs.Root, origPath string) error { + fullPath := reader.handleContentEncoding(ctx, w, r, root, origPath) + + // Open and serve content of file + file, err := root.Open(ctx, fullPath) + if err != nil { + return err + } + defer file.Close() + + fi, err := root.Lstat(ctx, fullPath) + if err != nil { + return err + } + + contentType, err := reader.detectContentType(ctx, root, origPath) + if err != nil { + return err + } + + reader.fileSizeMetric.WithLabelValues(reader.vfs.Name()).Observe(float64(fi.Size())) + + w.Header().Set("Content-Type", contentType) + w.Header().Set("Content-Length", strconv.FormatInt(fi.Size(), 10)) + w.WriteHeader(code) + + if r.Method != "HEAD" { + _, err := io.CopyN(w, file, fi.Size()) + return err + } + + return nil +} diff --git a/internal/serving/disk/reader_test.go b/internal/serving/disk/reader_test.go new file mode 100644 index 000000000..53ea3d9a5 --- /dev/null +++ b/internal/serving/disk/reader_test.go @@ -0,0 +1,68 @@ +package disk + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_redirectPath(t *testing.T) { + tests := map[string]struct { + request *http.Request + expectedPath string + }{ + "simple_url_no_path": { + request: newRequest(t, "https://domain.gitlab.io"), + expectedPath: "//domain.gitlab.io/", + }, + "path_only": { + request: newRequest(t, "https://domain.gitlab.io/index.html"), + expectedPath: "//domain.gitlab.io/index.html/", + }, + "query_only": { + request: newRequest(t, "https://domain.gitlab.io?query=test"), + expectedPath: "//domain.gitlab.io/?query=test", + }, + "empty_query": { + request: newRequest(t, "https://domain.gitlab.io?"), + expectedPath: "//domain.gitlab.io/", + }, + "fragment_only": { + request: newRequest(t, "https://domain.gitlab.io#fragment"), + expectedPath: "//domain.gitlab.io/#fragment", + }, + "path_and_query": { + request: newRequest(t, "https://domain.gitlab.io/index.html?query=test"), + expectedPath: "//domain.gitlab.io/index.html/?query=test", + }, + "path_and_fragment": { + request: newRequest(t, "https://domain.gitlab.io/index.html#fragment"), + expectedPath: "//domain.gitlab.io/index.html/#fragment", + }, + "query_and_fragment": { + request: newRequest(t, "https://domain.gitlab.io?query=test#fragment"), + expectedPath: "//domain.gitlab.io/?query=test#fragment", + }, + "path_query_and_fragment": { + request: newRequest(t, "https://domain.gitlab.io/index.html?query=test#fragment"), + expectedPath: "//domain.gitlab.io/index.html/?query=test#fragment", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := redirectPath(test.request) + require.Equal(t, test.expectedPath, got) + }) + } +} + +func newRequest(t *testing.T, url string) *http.Request { + t.Helper() + + r, err := http.NewRequest("GET", url, nil) + require.NoError(t, err) + + return r +} diff --git a/internal/serving/disk/serving.go b/internal/serving/disk/serving.go new file mode 100644 index 000000000..fbcdf9f2d --- /dev/null +++ b/internal/serving/disk/serving.go @@ -0,0 +1,58 @@ +package disk + +import ( + "os" + + "gitlab.com/gitlab-org/gitlab-pages/internal/config" + "gitlab.com/gitlab-org/gitlab-pages/internal/httperrors" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +// Disk describes a disk access serving +type Disk struct { + reader Reader +} + +// ServeFileHTTP serves a file from disk and returns true. It returns false +// when a file could not been found. +func (s *Disk) ServeFileHTTP(h serving.Handler) bool { + if s.reader.tryFile(h) { + return true + } + + if os.Getenv("FF_ENABLE_REDIRECTS") != "false" { + if s.reader.tryRedirects(h) { + return true + } + } + + return false +} + +// ServeNotFoundHTTP tries to read a custom 404 page +func (s *Disk) ServeNotFoundHTTP(h serving.Handler) { + if s.reader.tryNotFound(h) { + return + } + + // Generic 404 + httperrors.Serve404(h.Writer) +} + +// Reconfigure VFS +func (s *Disk) Reconfigure(cfg *config.Config) error { + return s.reader.vfs.Reconfigure(cfg) +} + +// New returns a serving instance that is capable of reading files +// from the VFS +func New(vfs vfs.VFS) serving.Serving { + return &Disk{ + reader: Reader{ + fileSizeMetric: metrics.DiskServingFileSize, + vfs: vfs, + }, + } +} diff --git a/internal/serving/disk/symlink/LICENSE b/internal/serving/disk/symlink/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/internal/serving/disk/symlink/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/serving/disk/symlink/PATENTS b/internal/serving/disk/symlink/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/internal/serving/disk/symlink/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/internal/serving/disk/symlink/README.md b/internal/serving/disk/symlink/README.md new file mode 100644 index 000000000..2b3678c29 --- /dev/null +++ b/internal/serving/disk/symlink/README.md @@ -0,0 +1,7 @@ +# Symlink code extracted from Go 1.14 stdlib + +This directory contains part of the Go standard library +`filepath.EvalSymlinks` code. It was vendored from Go 1.14.6. + +- `symlink.go` is based on https://github.com/golang/go/blob/go1.14.6/src/path/filepath/symlink.go +- `path_test.go` is based on https://github.com/golang/go/blob/go1.14.6/src/path/filepath/path_test.go#L768-L1000 diff --git a/internal/serving/disk/symlink/path_test.go b/internal/serving/disk/symlink/path_test.go new file mode 100644 index 000000000..ef9726c12 --- /dev/null +++ b/internal/serving/disk/symlink/path_test.go @@ -0,0 +1,195 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package symlink_test + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk/symlink" + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs/local" +) + +var fs = vfs.Instrumented(&local.VFS{}) + +type EvalSymlinksTest struct { + // If dest is empty, the path is created; otherwise the dest is symlinked to the path. + path, dest string +} + +var EvalSymlinksTestDirs = []EvalSymlinksTest{ + {"test", ""}, + {"test/dir", ""}, + {"test/dir/link3", "../../"}, + {"test/link1", "../test"}, + {"test/link2", "dir"}, + {"test/linkabs", "/"}, + {"test/link4", "../test2"}, + {"test2", "test/dir"}, + // Issue 23444. + {"src", ""}, + {"src/pool", ""}, + {"src/pool/test", ""}, + {"src/versions", ""}, + {"src/versions/current", "../../version"}, + {"src/versions/v1", ""}, + {"src/versions/v1/modules", ""}, + {"src/versions/v1/modules/test", "../../../pool/test"}, + {"version", "src/versions/v1"}, +} + +var EvalSymlinksTests = []EvalSymlinksTest{ + {"test", "test"}, + {"test/dir", "test/dir"}, + {"test/dir/../..", "."}, + {"test/link1", "test"}, + {"test/link2", "test/dir"}, + {"test/link1/dir", "test/dir"}, + {"test/link2/..", "test"}, + {"test/dir/link3", "."}, + {"test/link2/link3/test", "test"}, + {"test/linkabs", "/"}, + {"test/link4/..", "test"}, + {"src/versions/current/modules/test", "src/pool/test"}, +} + +// simpleJoin builds a file name from the directory and path. +// It does not use Join because we don't want ".." to be evaluated. +func simpleJoin(path ...string) string { + return strings.Join(path, string(filepath.Separator)) +} + +func testEvalSymlinks(t *testing.T, wd, path, want string) { + root, err := fs.Root(context.Background(), wd) + require.NoError(t, err) + + have, err := symlink.EvalSymlinks(context.Background(), root, path) + if err != nil { + t.Errorf("EvalSymlinks(%q) error: %v", path, err) + return + } + if filepath.Clean(have) != filepath.Clean(want) { + t.Errorf("EvalSymlinks(%q) returns %q, want %q", path, have, want) + } +} + +func TestEvalSymlinks(t *testing.T) { + _, tmpDir, cleanup := testhelpers.TmpDir(t, "symlink_tests") + defer cleanup() + + // Create the symlink farm using relative paths. + for _, d := range EvalSymlinksTestDirs { + var err error + path := simpleJoin(tmpDir, d.path) + if d.dest == "" { + err = os.Mkdir(path, 0755) + } else { + err = os.Symlink(d.dest, path) + } + if err != nil { + t.Fatal(err) + } + } + + // Evaluate the symlink farm. + for _, test := range EvalSymlinksTests { + testEvalSymlinks(t, tmpDir, test.path, test.dest) + + // test EvalSymlinks(".") + testEvalSymlinks(t, simpleJoin(tmpDir, test.path), ".", ".") + + // test EvalSymlinks("C:.") on Windows + if runtime.GOOS == "windows" { + volDot := filepath.VolumeName(tmpDir) + "." + testEvalSymlinks(t, simpleJoin(tmpDir, test.path), volDot, volDot) + } + + // test EvalSymlinks(".."+path) + testEvalSymlinks(t, + tmpDir, + simpleJoin("test", "..", test.path), + test.dest) + } +} + +func TestEvalSymlinksIsNotExist(t *testing.T) { + root, _, cleanup := testhelpers.TmpDir(t, "symlink_tests") + defer cleanup() + + _, err := symlink.EvalSymlinks(context.Background(), root, "notexist") + if !os.IsNotExist(err) { + t.Errorf("expected the file is not found, got %v\n", err) + } + + err = os.Symlink("notexist", "link") + if err != nil { + t.Fatal(err) + } + defer os.Remove("link") + + _, err = symlink.EvalSymlinks(context.Background(), root, "link") + if !os.IsNotExist(err) { + t.Errorf("expected the file is not found, got %v\n", err) + } +} + +func TestIssue13582(t *testing.T) { + root, tmpDir, cleanup := testhelpers.TmpDir(t, "symlink_tests") + defer cleanup() + + dir := filepath.Join(tmpDir, "dir") + err := os.Mkdir(dir, 0755) + if err != nil { + t.Fatal(err) + } + linkToDir := filepath.Join(tmpDir, "link_to_dir") + err = os.Symlink(dir, linkToDir) + if err != nil { + t.Fatal(err) + } + file := filepath.Join(linkToDir, "file") + err = ioutil.WriteFile(file, nil, 0644) + if err != nil { + t.Fatal(err) + } + link1 := filepath.Join(linkToDir, "link1") + err = os.Symlink(file, link1) + if err != nil { + t.Fatal(err) + } + link2 := filepath.Join(linkToDir, "link2") + err = os.Symlink(link1, link2) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + path, want string + }{ + {"dir", "dir"}, + {"link_to_dir", "dir"}, + {"link_to_dir/file", "dir/file"}, + {"link_to_dir/link1", "dir/file"}, + {"link_to_dir/link2", "dir/file"}, + } + for i, test := range tests { + have, err := symlink.EvalSymlinks(context.Background(), root, test.path) + if err != nil { + t.Fatal(err) + } + if have != test.want { + t.Errorf("test#%d: EvalSymlinks(%q) returns %q, want %q", i, test.path, have, test.want) + } + } +} diff --git a/internal/serving/disk/symlink/shims.go b/internal/serving/disk/symlink/shims.go new file mode 100644 index 000000000..90f67d45e --- /dev/null +++ b/internal/serving/disk/symlink/shims.go @@ -0,0 +1,17 @@ +package symlink + +import ( + "context" + "path/filepath" + + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" +) + +func volumeNameLen(s string) int { return 0 } + +func IsAbs(path string) bool { return filepath.IsAbs(path) } +func Clean(path string) string { return filepath.Clean(path) } + +func EvalSymlinks(ctx context.Context, root vfs.Root, path string) (string, error) { + return walkSymlinks(ctx, root, path) +} diff --git a/internal/serving/disk/symlink/symlink.go b/internal/serving/disk/symlink/symlink.go new file mode 100644 index 000000000..3b5d242ae --- /dev/null +++ b/internal/serving/disk/symlink/symlink.go @@ -0,0 +1,153 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package symlink + +import ( + "context" + "errors" + "os" + "runtime" + "syscall" + + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" +) + +func walkSymlinks(ctx context.Context, root vfs.Root, path string) (string, error) { + volLen := volumeNameLen(path) + pathSeparator := string(os.PathSeparator) + + if volLen < len(path) && os.IsPathSeparator(path[volLen]) { + volLen++ + } + vol := path[:volLen] + dest := vol + linksWalked := 0 + for start, end := volLen, volLen; start < len(path); start = end { + for start < len(path) && os.IsPathSeparator(path[start]) { + start++ + } + end = start + for end < len(path) && !os.IsPathSeparator(path[end]) { + end++ + } + + // On Windows, "." can be a symlink. + // We look it up, and use the value if it is absolute. + // If not, we just return ".". + isWindowsDot := runtime.GOOS == "windows" && path[volumeNameLen(path):] == "." + + // The next path component is in path[start:end]. + if end == start { + // No more path components. + break + } else if path[start:end] == "." && !isWindowsDot { + // Ignore path component ".". + continue + } else if path[start:end] == ".." { + // Back up to previous component if possible. + // Note that volLen includes any leading slash. + + // Set r to the index of the last slash in dest, + // after the volume. + var r int + for r = len(dest) - 1; r >= volLen; r-- { + if os.IsPathSeparator(dest[r]) { + break + } + } + + if r >= 0 && r+1 == volLen && os.IsPathSeparator(dest[r]) { + return "", errors.New("EvalSymlinks: cannot backtrack root path") + } else if r < volLen || dest[r+1:] == ".." { + // Either path has no slashes + // (it's empty or just "C:") + // or it ends in a ".." we had to keep. + // Either way, keep this "..". + if len(dest) > volLen { + dest += pathSeparator + } + dest += ".." + } else { + // Discard everything since the last slash. + dest = dest[:r] + } + continue + } + + // Ordinary path component. Add it to result. + + if len(dest) > volumeNameLen(dest) && !os.IsPathSeparator(dest[len(dest)-1]) { + dest += pathSeparator + } + + dest += path[start:end] + + // Resolve symlink. + + fi, err := root.Lstat(ctx, dest) + if err != nil { + return "", err + } + + if fi.Mode()&os.ModeSymlink == 0 { + if !fi.Mode().IsDir() && end < len(path) { + return "", syscall.ENOTDIR + } + continue + } + + // Found symlink. + + linksWalked++ + if linksWalked > 255 { + return "", errors.New("EvalSymlinks: too many links") + } + + link, err := root.Readlink(ctx, dest) + if err != nil { + return "", err + } + + if isWindowsDot && !IsAbs(link) { + // On Windows, if "." is a relative symlink, + // just return ".". + break + } + + path = link + path[end:] + + v := volumeNameLen(link) + if v > 0 { + // Symlink to drive name is an absolute path. + if v < len(link) && os.IsPathSeparator(link[v]) { + v++ + } + vol = link[:v] + dest = vol + end = len(vol) + } else if len(link) > 0 && os.IsPathSeparator(link[0]) { + // Symlink to absolute path. + dest = link[:1] + end = 1 + } else { + // Symlink to relative path; replace last + // path component in dest. + var r int + for r = len(dest) - 1; r >= volLen; r-- { + if os.IsPathSeparator(dest[r]) { + break + } + } + if r < volLen { + dest = vol + } else { + dest = dest[:r] + } + end = 0 + } + } + + return Clean(dest), nil +} diff --git a/internal/serving/disk/zip/serving.go b/internal/serving/disk/zip/serving.go new file mode 100644 index 000000000..6db0be10d --- /dev/null +++ b/internal/serving/disk/zip/serving.go @@ -0,0 +1,17 @@ +package zip + +import ( + "gitlab.com/gitlab-org/gitlab-pages/internal/config" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs/zip" +) + +var instance = disk.New(vfs.Instrumented(zip.New(&config.ZipServing{}))) + +// Instance returns a serving instance that is capable of reading files +// from a zip archives opened from a URL, most likely stored in object storage +func Instance() serving.Serving { + return instance +} diff --git a/internal/serving/disk/zip/serving_test.go b/internal/serving/disk/zip/serving_test.go new file mode 100644 index 000000000..e64a761a1 --- /dev/null +++ b/internal/serving/disk/zip/serving_test.go @@ -0,0 +1,128 @@ +package zip + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/config" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" +) + +func TestZip_ServeFileHTTP(t *testing.T) { + testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public-without-dirs.zip") + defer cleanup() + + tests := map[string]struct { + vfsPath string + path string + expectedStatus int + expectedBody string + }{ + "accessing /index.html": { + vfsPath: testServerURL + "/public.zip", + path: "/index.html", + expectedStatus: http.StatusOK, + expectedBody: "zip.gitlab.io/project/index.html\n", + }, + "accessing /": { + vfsPath: testServerURL + "/public.zip", + path: "/", + expectedStatus: http.StatusOK, + expectedBody: "zip.gitlab.io/project/index.html\n", + }, + "accessing without /": { + vfsPath: testServerURL + "/public.zip", + path: "", + expectedStatus: http.StatusFound, + expectedBody: `Found.`, + }, + "accessing archive that is 404": { + vfsPath: testServerURL + "/invalid.zip", + path: "/index.html", + // we expect the status to not be set + expectedStatus: 0, + }, + "accessing archive that is 500": { + vfsPath: testServerURL + "/500", + path: "/index.html", + expectedStatus: http.StatusInternalServerError, + }, + } + + cfg := &config.Config{ + Zip: &config.ZipServing{ + ExpirationInterval: 10 * time.Second, + CleanupInterval: 5 * time.Second, + RefreshInterval: 5 * time.Second, + OpenTimeout: 5 * time.Second, + }, + } + + s := Instance() + err := s.Reconfigure(cfg) + require.NoError(t, err) + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w := httptest.NewRecorder() + w.Code = 0 // ensure that code is not set, and it is being set by handler + r := httptest.NewRequest("GET", "http://zip.gitlab.io/zip"+test.path, nil) + + handler := serving.Handler{ + Writer: w, + Request: r, + LookupPath: &serving.LookupPath{ + Prefix: "/zip/", + Path: test.vfsPath, + }, + SubPath: test.path, + } + + if test.expectedStatus == 0 { + require.False(t, s.ServeFileHTTP(handler)) + require.Zero(t, w.Code, "we expect status to not be set") + return + } + + require.True(t, s.ServeFileHTTP(handler)) + + resp := w.Result() + defer resp.Body.Close() + + require.Equal(t, test.expectedStatus, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Contains(t, string(body), test.expectedBody) + }) + } +} + +var chdirSet = false + +func newZipFileServerURL(t *testing.T, zipFilePath string) (string, func()) { + t.Helper() + + chdir := testhelpers.ChdirInPath(t, "../../../../shared/pages", &chdirSet) + + m := http.NewServeMux() + m.HandleFunc("/public.zip", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.ServeFile(w, r, zipFilePath) + })) + m.HandleFunc("/500", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + + testServer := httptest.NewServer(m) + + return testServer.URL, func() { + chdir() + testServer.Close() + } +} diff --git a/internal/serving/handler.go b/internal/serving/handler.go new file mode 100644 index 000000000..a0d66ecbf --- /dev/null +++ b/internal/serving/handler.go @@ -0,0 +1,12 @@ +package serving + +import "net/http" + +// Handler aggregates response/request and lookup path + subpath needed to +// handle a request and response. +type Handler struct { + Writer http.ResponseWriter + Request *http.Request + LookupPath *LookupPath + SubPath string +} diff --git a/internal/serving/lookup_path.go b/internal/serving/lookup_path.go new file mode 100644 index 000000000..1aefe1b85 --- /dev/null +++ b/internal/serving/lookup_path.go @@ -0,0 +1,12 @@ +package serving + +// LookupPath holds a domain project configuration needed to handle a request +type LookupPath struct { + ServingType string // Serving type being used, like `zip` + Prefix string // Project prefix, for example, /my/project in group.gitlab.io/my/project/index.html + Path string // Path is an internal and serving-specific location of a document + IsNamespaceProject bool // IsNamespaceProject is DEPRECATED, see https://gitlab.com/gitlab-org/gitlab-pages/issues/272 + IsHTTPSOnly bool + HasAccessControl bool + ProjectID uint64 +} diff --git a/internal/serving/request.go b/internal/serving/request.go new file mode 100644 index 000000000..694d0df4f --- /dev/null +++ b/internal/serving/request.go @@ -0,0 +1,35 @@ +package serving + +import "net/http" + +// Request is a type that aggregates a serving itself, project lookup path and +// a request subpath based on an incoming request to serve page. +type Request struct { + Serving Serving // Serving chosen to serve this request + LookupPath *LookupPath // LookupPath contains pages project details + SubPath string // Subpath is a URL path subcomponent for this request +} + +// ServeFileHTTP forwards serving request handler to the serving itself +func (s *Request) ServeFileHTTP(w http.ResponseWriter, r *http.Request) bool { + handler := Handler{ + Writer: w, + Request: r, + LookupPath: s.LookupPath, + SubPath: s.SubPath, + } + + return s.Serving.ServeFileHTTP(handler) +} + +// ServeNotFoundHTTP forwards serving request handler to the serving itself +func (s *Request) ServeNotFoundHTTP(w http.ResponseWriter, r *http.Request) { + handler := Handler{ + Writer: w, + Request: r, + LookupPath: s.LookupPath, + SubPath: s.SubPath, + } + + s.Serving.ServeNotFoundHTTP(handler) +} diff --git a/internal/serving/serverless/certs.go b/internal/serving/serverless/certs.go new file mode 100644 index 000000000..674e8b258 --- /dev/null +++ b/internal/serving/serverless/certs.go @@ -0,0 +1,26 @@ +package serverless + +import ( + "crypto/tls" + "crypto/x509" +) + +// Certs holds definition of certificates we use to perform mTLS +// handshake with a cluster +type Certs struct { + RootCerts *x509.CertPool + Certificate tls.Certificate +} + +// NewClusterCerts creates a new cluster configuration from cert / key pair +func NewClusterCerts(clientCert, clientKey string) (*Certs, error) { + cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) + if err != nil { + return nil, err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM([]byte(clientCert)) + + return &Certs{RootCerts: caCertPool, Certificate: cert}, nil +} diff --git a/internal/serving/serverless/cluster.go b/internal/serving/serverless/cluster.go new file mode 100644 index 000000000..6bdd51da7 --- /dev/null +++ b/internal/serving/serverless/cluster.go @@ -0,0 +1,28 @@ +package serverless + +import ( + "crypto/tls" + "strings" +) + +// Cluster represent a Knative cluster that we want to proxy requests to +type Cluster struct { + Address string // Address is a real IP address of a cluster ingress + Port string // Port is a real port of HTTP TLS service + Name string // Name is a cluster name, used in cluster certificates + Certs *Certs +} + +// Host returns a real cluster location based on IP address and port +func (c Cluster) Host() string { + return strings.Join([]string{c.Address, c.Port}, ":") +} + +// TLSConfig builds a new tls.Config and returns a pointer to it +func (c Cluster) TLSConfig() *tls.Config { + return &tls.Config{ + Certificates: []tls.Certificate{c.Certs.Certificate}, + RootCAs: c.Certs.RootCerts, + ServerName: c.Name, + } +} diff --git a/internal/serving/serverless/director.go b/internal/serving/serverless/director.go new file mode 100644 index 000000000..3f1bc99a7 --- /dev/null +++ b/internal/serving/serverless/director.go @@ -0,0 +1,20 @@ +package serverless + +import ( + "net/http" + + "github.com/tomasen/realip" +) + +// NewDirectorFunc returns a director function capable of configuring a proxy +// request +func NewDirectorFunc(service string) func(*http.Request) { + return func(request *http.Request) { + request.Host = service + request.URL.Host = service + request.URL.Scheme = "https" + request.Header.Set("User-Agent", "GitLab Pages Daemon") + request.Header.Set("X-Forwarded-For", realip.FromRequest(request)) + request.Header.Set("X-Forwarded-Proto", "https") + } +} diff --git a/internal/serving/serverless/errors.go b/internal/serving/serverless/errors.go new file mode 100644 index 000000000..d208a11df --- /dev/null +++ b/internal/serving/serverless/errors.go @@ -0,0 +1,26 @@ +package serverless + +import ( + "encoding/json" + "net/http" +) + +// NewErrorHandler returns a func(http.ResponseWriter, *http.Request, error) +// responsible for handling proxy errors +func NewErrorHandler() func(http.ResponseWriter, *http.Request, error) { + return func(w http.ResponseWriter, r *http.Request, err error) { + w.WriteHeader(http.StatusInternalServerError) + + message := "cluster error: " + err.Error() + msgmap := map[string]string{"error": message} + + json, err := json.Marshal(msgmap) + if err != nil { + w.Write([]byte(message)) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + } +} diff --git a/internal/serving/serverless/serverless.go b/internal/serving/serverless/serverless.go new file mode 100644 index 000000000..f8bd4e87b --- /dev/null +++ b/internal/serving/serverless/serverless.go @@ -0,0 +1,73 @@ +package serverless + +import ( + "errors" + "net/http/httputil" + + "gitlab.com/gitlab-org/gitlab-pages/internal/config" + "gitlab.com/gitlab-org/gitlab-pages/internal/httperrors" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +// Serverless is a servering used to proxy requests between a client and +// Knative cluster. +type Serverless struct { + proxy *httputil.ReverseProxy +} + +// NewFromAPISource returns a serverless serving instance built from GitLab API +// response +func NewFromAPISource(config api.Serverless) (serving.Serving, error) { + if len(config.Service) == 0 { + return nil, errors.New("incomplete serverless serving config") + } + + certs, err := NewClusterCerts( + config.Cluster.CertificateCert, + config.Cluster.CertificateKey, + ) + if err != nil { + return nil, err + } + + cluster := Cluster{ + Name: config.Cluster.Hostname, + Address: config.Cluster.Address, + Port: config.Cluster.Port, + Certs: certs, + } + + return New(config.Service, cluster), nil +} + +// New returns a new serving instance +func New(service string, cluster Cluster) serving.Serving { + proxy := httputil.ReverseProxy{ + Director: NewDirectorFunc(service), + Transport: NewTransport(cluster), + ErrorHandler: NewErrorHandler(), + } + + return &Serverless{proxy: &proxy} +} + +// ServeFileHTTP handle an incoming request and proxies it to Knative cluster +func (s *Serverless) ServeFileHTTP(h serving.Handler) bool { + metrics.ServerlessRequests.Inc() + + s.proxy.ServeHTTP(h.Writer, h.Request) + + return true +} + +// ServeNotFoundHTTP responds with 404 +func (s *Serverless) ServeNotFoundHTTP(h serving.Handler) { + httperrors.Serve404(h.Writer) +} + +// Reconfigure noop +func (s *Serverless) Reconfigure(*config.Config) error { + return nil +} diff --git a/internal/serving/serverless/serverless_test.go b/internal/serving/serverless/serverless_test.go new file mode 100644 index 000000000..ebd143432 --- /dev/null +++ b/internal/serving/serverless/serverless_test.go @@ -0,0 +1,165 @@ +package serverless + +import ( + "crypto/tls" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/fixture" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" +) + +func withTestCluster(t *testing.T, cert, key string, block func(*http.ServeMux, *url.URL, *Certs)) { + mux := http.NewServeMux() + cluster := httptest.NewUnstartedServer(mux) + + certs, err := NewClusterCerts(fixture.Certificate, fixture.Key) + require.NoError(t, err) + + cluster.TLS = &tls.Config{ + Certificates: []tls.Certificate{certs.Certificate}, + RootCAs: certs.RootCerts, + } + + cluster.StartTLS() + defer cluster.Close() + + address, err := url.Parse(cluster.URL) + require.NoError(t, err) + + block(mux, address, certs) +} + +func TestServeFileHTTP(t *testing.T) { + t.Run("when proxying simple request to a cluster", func(t *testing.T) { + withTestCluster(t, fixture.Certificate, fixture.Key, func(mux *http.ServeMux, server *url.URL, certs *Certs) { + serverless := New( + "my-func.my-namespace-123.knative.example.com", + Cluster{ + Name: "knative.gitlab-example.com", + Address: server.Hostname(), + Port: server.Port(), + Certs: certs, + }, + ) + + writer := httptest.NewRecorder() + request := httptest.NewRequest("GET", "http://example.gitlab.com/", nil) + handler := serving.Handler{Writer: writer, Request: request} + request.Header.Set("X-Real-IP", "127.0.0.105") + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, "my-func.my-namespace-123.knative.example.com", r.Host) + require.Equal(t, "GitLab Pages Daemon", r.Header.Get("User-Agent")) + require.Equal(t, "https", r.Header.Get("X-Forwarded-Proto")) + require.Contains(t, r.Header.Get("X-Forwarded-For"), "127.0.0.105") + }) + + served := serverless.ServeFileHTTP(handler) + result := writer.Result() + + require.True(t, served) + require.Equal(t, http.StatusOK, result.StatusCode) + }) + }) + + t.Run("when proxying request with invalid hostname", func(t *testing.T) { + withTestCluster(t, fixture.Certificate, fixture.Key, func(mux *http.ServeMux, server *url.URL, certs *Certs) { + serverless := New( + "my-func.my-namespace-123.knative.example.com", + Cluster{ + Name: "knative.invalid-gitlab-example.com", + Address: server.Hostname(), + Port: server.Port(), + Certs: certs, + }, + ) + + writer := httptest.NewRecorder() + request := httptest.NewRequest("GET", "http://example.gitlab.com/", nil) + handler := serving.Handler{Writer: writer, Request: request} + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + }) + + served := serverless.ServeFileHTTP(handler) + result := writer.Result() + body, err := ioutil.ReadAll(writer.Body) + require.NoError(t, err) + + require.True(t, served) + require.Equal(t, http.StatusInternalServerError, result.StatusCode) + require.Contains(t, string(body), "cluster error: x509: certificate") + }) + }) + + t.Run("when a cluster responds with an error", func(t *testing.T) { + withTestCluster(t, fixture.Certificate, fixture.Key, func(mux *http.ServeMux, server *url.URL, certs *Certs) { + serverless := New( + "my-func.my-namespace-123.knative.example.com", + Cluster{ + Name: "knative.gitlab-example.com", + Address: server.Hostname(), + Port: server.Port(), + Certs: certs, + }, + ) + + writer := httptest.NewRecorder() + request := httptest.NewRequest("GET", "http://example.gitlab.com/", nil) + handler := serving.Handler{Writer: writer, Request: request} + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + w.Write([]byte("sorry, service unavailable")) + }) + + served := serverless.ServeFileHTTP(handler) + result := writer.Result() + body, err := ioutil.ReadAll(writer.Body) + require.NoError(t, err) + + require.True(t, served) + require.Equal(t, http.StatusServiceUnavailable, result.StatusCode) + require.Contains(t, string(body), "sorry, service unavailable") + }) + }) + + t.Run("when a cluster responds correctly", func(t *testing.T) { + withTestCluster(t, fixture.Certificate, fixture.Key, func(mux *http.ServeMux, server *url.URL, certs *Certs) { + serverless := New( + "my-func.my-namespace-123.knative.example.com", + Cluster{ + Name: "knative.gitlab-example.com", + Address: server.Hostname(), + Port: server.Port(), + Certs: certs, + }, + ) + + writer := httptest.NewRecorder() + request := httptest.NewRequest("GET", "http://example.gitlab.com/", nil) + handler := serving.Handler{Writer: writer, Request: request} + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("OK")) + }) + + served := serverless.ServeFileHTTP(handler) + result := writer.Result() + body, err := ioutil.ReadAll(writer.Body) + require.NoError(t, err) + + require.True(t, served) + require.Equal(t, http.StatusOK, result.StatusCode) + require.Contains(t, string(body), "OK") + }) + }) +} diff --git a/internal/serving/serverless/transport.go b/internal/serving/serverless/transport.go new file mode 100644 index 000000000..b7fabb13b --- /dev/null +++ b/internal/serving/serverless/transport.go @@ -0,0 +1,51 @@ +package serverless + +import ( + "context" + "net" + "net/http" + "time" + + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +// Transport is a struct that handle the proxy connection round trip to Knative +// cluster +type Transport struct { + cluster Cluster + transport *http.Transport +} + +// NewTransport fabricates as new transport type +func NewTransport(cluster Cluster) *Transport { + dialer := net.Dialer{ + Timeout: 4 * time.Minute, + KeepAlive: 6 * time.Minute, + } + + dialContext := func(ctx context.Context, network, address string) (net.Conn, error) { + address = cluster.Host() + + return dialer.DialContext(ctx, network, address) + } + + return &Transport{ + cluster: cluster, + transport: &http.Transport{ + DialContext: dialContext, + TLSHandshakeTimeout: 5 * time.Second, + TLSClientConfig: cluster.TLSConfig(), + }, + } +} + +// RoundTrip performs a connection to a Knative cluster and returns a response +func (t *Transport) RoundTrip(request *http.Request) (*http.Response, error) { + start := time.Now() + + response, err := t.transport.RoundTrip(request) + + metrics.ServerlessLatency.Observe(time.Since(start).Seconds()) + + return response, err +} diff --git a/internal/serving/serving.go b/internal/serving/serving.go new file mode 100644 index 000000000..786ee569e --- /dev/null +++ b/internal/serving/serving.go @@ -0,0 +1,10 @@ +package serving + +import "gitlab.com/gitlab-org/gitlab-pages/internal/config" + +// Serving is an interface used to define a serving driver +type Serving interface { + ServeFileHTTP(Handler) bool + ServeNotFoundHTTP(Handler) + Reconfigure(config *config.Config) error +} diff --git a/internal/source/config.go b/internal/source/config.go new file mode 100644 index 000000000..9cf87bc65 --- /dev/null +++ b/internal/source/config.go @@ -0,0 +1,7 @@ +package source + +import "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/client" + +// Config represents an interface that is configuration provider for client +// capable of comunicating with GitLab +type Config client.Config diff --git a/internal/source/disk/config.go b/internal/source/disk/config.go new file mode 100644 index 000000000..d2e6c123b --- /dev/null +++ b/internal/source/disk/config.go @@ -0,0 +1,57 @@ +package disk + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" +) + +// DomainConfig represents a custom domain config +type domainConfig struct { + Domain string + Certificate string + Key string + HTTPSOnly bool `json:"https_only"` + ID uint64 `json:"id"` + AccessControl bool `json:"access_control"` +} + +// MultiDomainConfig represents a group of custom domain configs +type multiDomainConfig struct { + Domains []domainConfig + HTTPSOnly bool `json:"https_only"` + ID uint64 `json:"id"` + AccessControl bool `json:"access_control"` +} + +// ProjectConfig is a project-level configuration +type projectConfig struct { + NamespaceProject bool + HTTPSOnly bool + AccessControl bool + ID uint64 +} + +// Valid validates a custom domain config for a root domain +func (c *domainConfig) Valid(rootDomain string) bool { + if c.Domain == "" { + return false + } + + // TODO: better sanitize domain + domain := strings.ToLower(c.Domain) + rootDomain = "." + rootDomain + return !strings.HasSuffix(domain, rootDomain) +} + +// Read reads a multi domain config and decodes it from a `config.json` +func (c *multiDomainConfig) Read(group, project string) error { + configFile, err := os.Open(filepath.Join(group, project, "config.json")) + if err != nil { + return err + } + defer configFile.Close() + + return json.NewDecoder(configFile).Decode(c) +} diff --git a/internal/source/disk/config_test.go b/internal/source/disk/config_test.go new file mode 100644 index 000000000..1bb2364ab --- /dev/null +++ b/internal/source/disk/config_test.go @@ -0,0 +1,65 @@ +package disk + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +const configFile = "test-group/test-project/config.json" +const invalidConfig = `{"Domains":{}}` +const validConfig = `{"Domains":[{"Domain":"test"}]}` + +func TestDomainConfigValidness(t *testing.T) { + d := domainConfig{} + require.False(t, d.Valid("gitlab.io")) + + d = domainConfig{Domain: "test"} + require.True(t, d.Valid("gitlab.io")) + + d = domainConfig{Domain: "test"} + require.True(t, d.Valid("gitlab.io")) + + d = domainConfig{Domain: "test.gitlab.io"} + require.False(t, d.Valid("gitlab.io")) + + d = domainConfig{Domain: "test.test.gitlab.io"} + require.False(t, d.Valid("gitlab.io")) + + d = domainConfig{Domain: "test.testgitlab.io"} + require.True(t, d.Valid("gitlab.io")) + + d = domainConfig{Domain: "test.GitLab.Io"} + require.False(t, d.Valid("gitlab.io")) +} + +func TestDomainConfigRead(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + d := multiDomainConfig{} + err := d.Read("test-group", "test-project") + require.Error(t, err) + + os.MkdirAll(filepath.Dir(configFile), 0700) + defer os.RemoveAll("test-group") + + d = multiDomainConfig{} + err = d.Read("test-group", "test-project") + require.Error(t, err) + + err = ioutil.WriteFile(configFile, []byte(invalidConfig), 0600) + require.NoError(t, err) + d = multiDomainConfig{} + err = d.Read("test-group", "test-project") + require.Error(t, err) + + err = ioutil.WriteFile(configFile, []byte(validConfig), 0600) + require.NoError(t, err) + d = multiDomainConfig{} + err = d.Read("test-group", "test-project") + require.NoError(t, err) +} diff --git a/internal/source/disk/custom.go b/internal/source/disk/custom.go new file mode 100644 index 000000000..037abfeee --- /dev/null +++ b/internal/source/disk/custom.go @@ -0,0 +1,37 @@ +package disk + +import ( + "net/http" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk/local" +) + +type customProjectResolver struct { + config *domainConfig + + path string +} + +func (p *customProjectResolver) Resolve(r *http.Request) (*serving.Request, error) { + if p.config == nil { + return nil, domain.ErrDomainDoesNotExist + } + + lookupPath := &serving.LookupPath{ + ServingType: "file", + Prefix: "/", + Path: p.path, + IsNamespaceProject: false, + IsHTTPSOnly: p.config.HTTPSOnly, + HasAccessControl: p.config.AccessControl, + ProjectID: p.config.ID, + } + + return &serving.Request{ + Serving: local.Instance(), + LookupPath: lookupPath, + SubPath: r.URL.Path, + }, nil +} diff --git a/internal/source/disk/disk.go b/internal/source/disk/disk.go new file mode 100644 index 000000000..272d6c4ee --- /dev/null +++ b/internal/source/disk/disk.go @@ -0,0 +1,55 @@ +package disk + +import ( + "strings" + "sync" + "time" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" +) + +// Disk struct represents a map of all domains supported by pages that are +// stored on a disk with corresponding `config.json`. +type Disk struct { + dm Map + lock *sync.RWMutex +} + +// New is a factory method for the Disk source. It is initializing a mutex. It +// should not initialize `dm` as we later check the readiness by comparing it +// with a nil value. +func New() *Disk { + return &Disk{ + lock: &sync.RWMutex{}, + } +} + +// GetDomain returns a domain from the domains map if it exists +func (d *Disk) GetDomain(host string) (*domain.Domain, error) { + host = strings.ToLower(host) + + d.lock.RLock() + defer d.lock.RUnlock() + + return d.dm[host], nil +} + +// IsReady checks if the domains source is ready for work. The disk source is +// ready after traversing entire filesystem and reading all domains' +// configuration files. +func (d *Disk) IsReady() bool { + return d.dm != nil +} + +// Read starts the domain source, in this case it is reading domains from +// groups on disk concurrently. +func (d *Disk) Read(rootDomain string) { + go Watch(rootDomain, d.updateDomains, time.Second) +} + +func (d *Disk) updateDomains(dm Map) { + d.lock.Lock() + defer d.lock.Unlock() + + d.dm = dm +} diff --git a/internal/source/disk/domain_test.go b/internal/source/disk/domain_test.go new file mode 100644 index 000000000..abffddb4b --- /dev/null +++ b/internal/source/disk/domain_test.go @@ -0,0 +1,507 @@ +package disk + +import ( + "compress/gzip" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + "time" + + "github.com/andybalholm/brotli" + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/fixture" + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" +) + +func serveFileOrNotFound(domain *domain.Domain) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !domain.ServeFileHTTP(w, r) { + domain.ServeNotFoundHTTP(w, r) + } + } +} + +func testGroupServeHTTPHost(t *testing.T, host string) { + testGroup := &domain.Domain{ + Resolver: &Group{ + name: "group", + projects: map[string]*projectConfig{ + "group.test.io": &projectConfig{}, + "group.gitlab-example.com": &projectConfig{}, + "project": &projectConfig{}, + "project2": &projectConfig{}, + }, + }, + } + + makeURL := func(path string) string { + return "http://" + host + path + } + + serve := serveFileOrNotFound(testGroup) + + require.HTTPBodyContains(t, serve, "GET", makeURL("/"), nil, "main-dir") + require.HTTPBodyContains(t, serve, "GET", makeURL("/index"), nil, "main-dir") + require.HTTPBodyContains(t, serve, "GET", makeURL("/index.html"), nil, "main-dir") + testhelpers.AssertRedirectTo(t, serve, "GET", makeURL("/project"), nil, "//"+host+"/project/") + require.HTTPBodyContains(t, serve, "GET", makeURL("/project/"), nil, "project-subdir") + require.HTTPBodyContains(t, serve, "GET", makeURL("/project/index"), nil, "project-subdir") + require.HTTPBodyContains(t, serve, "GET", makeURL("/project/index/"), nil, "project-subdir") + require.HTTPBodyContains(t, serve, "GET", makeURL("/project/index.html"), nil, "project-subdir") + testhelpers.AssertRedirectTo(t, serve, "GET", makeURL("/project/subdir"), nil, "//"+host+"/project/subdir/") + require.HTTPBodyContains(t, serve, "GET", makeURL("/project/subdir/"), nil, "project-subsubdir") + require.HTTPBodyContains(t, serve, "GET", makeURL("/project2/"), nil, "project2-main") + require.HTTPBodyContains(t, serve, "GET", makeURL("/project2/index"), nil, "project2-main") + require.HTTPBodyContains(t, serve, "GET", makeURL("/project2/index.html"), nil, "project2-main") + require.HTTPError(t, serve, "GET", makeURL("/private.project/"), nil) + require.HTTPError(t, serve, "GET", makeURL("//about.gitlab.com/%2e%2e"), nil) + require.HTTPError(t, serve, "GET", makeURL("/symlink"), nil) + require.HTTPError(t, serve, "GET", makeURL("/symlink/index.html"), nil) + require.HTTPError(t, serve, "GET", makeURL("/symlink/subdir/"), nil) + require.HTTPError(t, serve, "GET", makeURL("/project/fifo"), nil) + require.HTTPError(t, serve, "GET", makeURL("/not-existing-file"), nil) + require.HTTPRedirect(t, serve, "GET", makeURL("/project//about.gitlab.com/%2e%2e"), nil) +} + +func TestGroupServeHTTP(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + t.Run("group.test.io", func(t *testing.T) { testGroupServeHTTPHost(t, "group.test.io") }) + t.Run("group.test.io:8080", func(t *testing.T) { testGroupServeHTTPHost(t, "group.test.io:8080") }) +} + +func TestDomainServeHTTP(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + testDomain := &domain.Domain{ + Name: "test.domain.com", + Resolver: &customProjectResolver{ + path: "group/project2/public", + config: &domainConfig{}, + }, + } + + require.HTTPBodyContains(t, serveFileOrNotFound(testDomain), "GET", "/", nil, "project2-main") + require.HTTPBodyContains(t, serveFileOrNotFound(testDomain), "GET", "/index.html", nil, "project2-main") + require.HTTPRedirect(t, serveFileOrNotFound(testDomain), "GET", "/subdir", nil) + require.HTTPBodyContains(t, serveFileOrNotFound(testDomain), "GET", "/subdir", nil, + `Found`) + require.HTTPBodyContains(t, serveFileOrNotFound(testDomain), "GET", "/subdir/", nil, "project2-subdir") + require.HTTPBodyContains(t, serveFileOrNotFound(testDomain), "GET", "/subdir/index.html", nil, "project2-subdir") + require.HTTPError(t, serveFileOrNotFound(testDomain), "GET", "//about.gitlab.com/%2e%2e", nil) + require.HTTPError(t, serveFileOrNotFound(testDomain), "GET", "/not-existing-file", nil) +} + +func TestIsHTTPSOnly(t *testing.T) { + tests := []struct { + name string + domain *domain.Domain + url string + expected bool + }{ + { + name: "Default group domain with HTTPS-only enabled", + domain: &domain.Domain{ + Resolver: &Group{ + name: "group", + projects: projects{"test-domain": &projectConfig{HTTPSOnly: true}}, + }, + }, + url: "http://test-domain", + expected: true, + }, + { + name: "Default group domain with HTTPS-only disabled", + domain: &domain.Domain{ + Resolver: &Group{ + name: "group", + projects: projects{"test-domain": &projectConfig{HTTPSOnly: false}}, + }, + }, + url: "http://test-domain", + expected: false, + }, + { + name: "Case-insensitive default group domain with HTTPS-only enabled", + domain: &domain.Domain{ + Resolver: &Group{ + name: "group", + projects: projects{"test-domain": &projectConfig{HTTPSOnly: true}}, + }, + }, + url: "http://Test-domain", + expected: true, + }, + { + name: "Other group domain with HTTPS-only enabled", + domain: &domain.Domain{ + Resolver: &Group{ + name: "group", + projects: projects{"project": &projectConfig{HTTPSOnly: true}}, + }, + }, + url: "http://test-domain/project", + expected: true, + }, + { + name: "Other group domain with HTTPS-only disabled", + domain: &domain.Domain{ + Resolver: &Group{ + name: "group", + projects: projects{"project": &projectConfig{HTTPSOnly: false}}, + }, + }, + url: "http://test-domain/project", + expected: false, + }, + { + name: "Unknown project", + domain: &domain.Domain{ + Resolver: &Group{}, + }, + url: "http://test-domain/project", + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + req, _ := http.NewRequest(http.MethodGet, test.url, nil) + require.Equal(t, test.expected, test.domain.IsHTTPSOnly(req)) + }) + } +} + +func testHTTPGzip(t *testing.T, handler http.HandlerFunc, mode, url string, values url.Values, acceptEncoding string, str interface{}, contentType string, expectCompressed bool) { + w := httptest.NewRecorder() + req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) + require.NoError(t, err) + if acceptEncoding != "" { + req.Header.Add("Accept-Encoding", acceptEncoding) + } + handler(w, req) + + if expectCompressed { + contentLength := w.Header().Get("Content-Length") + require.Equal(t, strconv.Itoa(w.Body.Len()), contentLength, "Content-Length") + + contentEncoding := w.Header().Get("Content-Encoding") + require.Equal(t, "gzip", contentEncoding, "Content-Encoding") + + reader, err := gzip.NewReader(w.Body) + require.NoError(t, err) + defer reader.Close() + + bytes, err := ioutil.ReadAll(reader) + require.NoError(t, err) + require.Contains(t, string(bytes), str) + } else { + require.Contains(t, w.Body.String(), str) + } + + require.Equal(t, contentType, w.Header().Get("Content-Type")) +} + +func TestGroupServeHTTPGzip(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + testGroup := &domain.Domain{ + Resolver: &Group{ + name: "group", + projects: map[string]*projectConfig{ + "group.test.io": &projectConfig{}, + "group.gitlab-example.com": &projectConfig{}, + "project": &projectConfig{}, + "project2": &projectConfig{}, + }, + }, + } + + testSet := []struct { + mode string // HTTP mode + url string // Test URL + acceptEncoding string // Accept encoding header + body interface{} // Expected body at above URL + contentType string // Expected content-type + expectCompressed bool // Expect the response to be gzipped? + }{ + // No gzip encoding requested + {"GET", "/index.html", "", "main-dir", "text/html; charset=utf-8", false}, + {"GET", "/index.html", "identity", "main-dir", "text/html; charset=utf-8", false}, + {"GET", "/index.html", "gzip; q=0", "main-dir", "text/html; charset=utf-8", false}, + // gzip encoding requested, + {"GET", "/index.html", "identity, gzip", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "gzip", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "gzip; q=1", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "gzip; q=0.9", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "gzip, deflate", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "gzip; q=1, deflate", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "gzip; q=0.9, deflate", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "br; q=0.9, gzip; q=1", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "br; q=0, gzip; q=1", "main-dir", "text/html; charset=utf-8", true}, + // fallback to gzip because .br is missing + {"GET", "/index2.html", "*", "main-dir", "text/html; charset=utf-8", true}, + // gzip encoding requested, but url does not have compressed content on disk + {"GET", "/project2/index.html", "*", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "identity, gzip", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "gzip", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "gzip; q=1", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "gzip; q=0.9", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "gzip, deflate", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "gzip; q=1, deflate", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "gzip; q=0.9, deflate", "project2-main", "text/html; charset=utf-8", false}, + // malformed headers + {"GET", "/index.html", ";; gzip", "main-dir", "text/html; charset=utf-8", false}, + {"GET", "/index.html", "middle-out", "main-dir", "text/html; charset=utf-8", false}, + {"GET", "/index.html", "gzip; quality=1", "main-dir", "text/html; charset=utf-8", false}, + // Symlinked .gz files are not supported + {"GET", "/gz-symlink", "*", "data", "text/plain; charset=utf-8", false}, + // Unknown file-extension, with text content + {"GET", "/text.unknown", "gzip", "hello", "text/plain; charset=utf-8", true}, + {"GET", "/text-nogzip.unknown", "*", "hello", "text/plain; charset=utf-8", false}, + // Unknown file-extension, with PNG content + {"GET", "/image.unknown", "gzip", "GIF89a", "image/gif", true}, + {"GET", "/image-nogzip.unknown", "*", "GIF89a", "image/gif", false}, + } + + for _, tt := range testSet { + t.Run(tt.url+" acceptEncoding: "+tt.acceptEncoding, func(t *testing.T) { + URL := "http://group.test.io" + tt.url + testHTTPGzip(t, serveFileOrNotFound(testGroup), tt.mode, URL, nil, tt.acceptEncoding, tt.body, tt.contentType, tt.expectCompressed) + }) + } +} + +func testHTTPBrotli(t *testing.T, handler http.HandlerFunc, mode, url string, values url.Values, acceptEncoding string, str interface{}, contentType string, expectCompressed bool) { + w := httptest.NewRecorder() + req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) + require.NoError(t, err) + if acceptEncoding != "" { + req.Header.Add("Accept-Encoding", acceptEncoding) + } + handler(w, req) + + if expectCompressed { + contentLength := w.Header().Get("Content-Length") + require.Equal(t, strconv.Itoa(w.Body.Len()), contentLength, "Content-Length") + + contentEncoding := w.Header().Get("Content-Encoding") + require.Equal(t, "br", contentEncoding, "Content-Encoding") + + reader := brotli.NewReader(w.Body) + bytes, err := ioutil.ReadAll(reader) + require.NoError(t, err) + require.Contains(t, string(bytes), str) + } else { + require.Contains(t, w.Body.String(), str) + } + + require.Equal(t, contentType, w.Header().Get("Content-Type")) +} + +func TestGroupServeHTTPBrotli(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + testGroup := &domain.Domain{ + Resolver: &Group{ + name: "group", + projects: map[string]*projectConfig{ + "group.test.io": &projectConfig{}, + "group.gitlab-example.com": &projectConfig{}, + "project": &projectConfig{}, + "project2": &projectConfig{}, + }, + }, + } + + testSet := []struct { + mode string // HTTP mode + url string // Test URL + acceptEncoding string // Accept encoding header + body interface{} // Expected body at above URL + contentType string // Expected content-type + expectCompressed bool // Expect the response to be br compressed? + }{ + // No br encoding requested + {"GET", "/index.html", "", "main-dir", "text/html; charset=utf-8", false}, + {"GET", "/index.html", "identity", "main-dir", "text/html; charset=utf-8", false}, + {"GET", "/index.html", "br; q=0", "main-dir", "text/html; charset=utf-8", false}, + // br encoding requested, + {"GET", "/index.html", "*", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "identity, br", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "br", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "br; q=1", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "br; q=0.9", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "br, deflate", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "br; q=1, deflate", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "br; q=0.9, deflate", "main-dir", "text/html; charset=utf-8", true}, + {"GET", "/index.html", "gzip; q=0.5, br; q=1", "main-dir", "text/html; charset=utf-8", true}, + // br encoding requested, but url does not have compressed content on disk + {"GET", "/project2/index.html", "*", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "identity, br", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "br", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "br; q=1", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "br; q=0.9", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "br, deflate", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "br; q=1, deflate", "project2-main", "text/html; charset=utf-8", false}, + {"GET", "/project2/index.html", "br; q=0.9, deflate", "project2-main", "text/html; charset=utf-8", false}, + // malformed headers + {"GET", "/index.html", ";; br", "main-dir", "text/html; charset=utf-8", false}, + {"GET", "/index.html", "middle-out", "main-dir", "text/html; charset=utf-8", false}, + {"GET", "/index.html", "br; quality=1", "main-dir", "text/html; charset=utf-8", false}, + // Symlinked .br files are not supported + {"GET", "/gz-symlink", "*", "data", "text/plain; charset=utf-8", false}, + // Unknown file-extension, with text content + {"GET", "/text.unknown", "*", "hello", "text/plain; charset=utf-8", true}, + {"GET", "/text-nogzip.unknown", "*", "hello", "text/plain; charset=utf-8", false}, + // Unknown file-extension, with PNG content + {"GET", "/image.unknown", "*", "GIF89a", "image/gif", true}, + {"GET", "/image-nogzip.unknown", "*", "GIF89a", "image/gif", false}, + } + + for _, tt := range testSet { + t.Run(tt.url+" acceptEncoding: "+tt.acceptEncoding, func(t *testing.T) { + URL := "http://group.test.io" + tt.url + testHTTPBrotli(t, serveFileOrNotFound(testGroup), tt.mode, URL, nil, tt.acceptEncoding, tt.body, tt.contentType, tt.expectCompressed) + }) + } +} + +func TestGroup404ServeHTTP(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + testGroup := &domain.Domain{ + Resolver: &Group{ + name: "group.404", + projects: map[string]*projectConfig{ + "domain.404": &projectConfig{}, + "group.404.test.io": &projectConfig{}, + "project.404": &projectConfig{}, + "project.404.symlink": &projectConfig{}, + "project.no.404": &projectConfig{}, + }, + }, + } + + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testGroup), "GET", "http://group.404.test.io/project.404/not/existing-file", nil, "Custom 404 project page") + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testGroup), "GET", "http://group.404.test.io/project.404/", nil, "Custom 404 project page") + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testGroup), "GET", "http://group.404.test.io/not/existing-file", nil, "Custom 404 group page") + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testGroup), "GET", "http://group.404.test.io/not-existing-file", nil, "Custom 404 group page") + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testGroup), "GET", "http://group.404.test.io/", nil, "Custom 404 group page") + require.HTTPBodyNotContains(t, serveFileOrNotFound(testGroup), "GET", "http://group.404.test.io/project.404.symlink/not/existing-file", nil, "Custom 404 project page") + + // Ensure the namespace project's custom 404.html is not used by projects + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testGroup), "GET", "http://group.404.test.io/project.no.404/not/existing-file", nil, "The page you're looking for could not be found.") +} + +func TestDomain404ServeHTTP(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + testDomain := &domain.Domain{ + Resolver: &customProjectResolver{ + path: "group.404/domain.404/public/", + config: &domainConfig{Domain: "domain.404.com"}, + }, + } + + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testDomain), "GET", "http://group.404.test.io/not-existing-file", nil, "Custom domain.404 page") + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testDomain), "GET", "http://group.404.test.io/", nil, "Custom domain.404 page") +} + +func TestPredefined404ServeHTTP(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + testDomain := domain.New("", "", "", &customProjectResolver{}) + + testhelpers.AssertHTTP404(t, serveFileOrNotFound(testDomain), "GET", "http://group.test.io/not-existing-file", nil, "The page you're looking for could not be found") +} + +func TestGroupCertificate(t *testing.T) { + testGroup := &domain.Domain{} + + tls, err := testGroup.EnsureCertificate() + require.Nil(t, tls) + require.Error(t, err) +} + +func TestDomainNoCertificate(t *testing.T) { + testDomain := &domain.Domain{ + Resolver: &customProjectResolver{ + path: "group/project2/public", + config: &domainConfig{Domain: "test.domain.com"}, + }, + } + + tls, err := testDomain.EnsureCertificate() + require.Nil(t, tls) + require.Error(t, err) + + _, err2 := testDomain.EnsureCertificate() + require.Error(t, err) + require.Equal(t, err, err2) +} + +func TestDomainCertificate(t *testing.T) { + testDomain := &domain.Domain{ + Name: "test.domain.com", + CertificateCert: fixture.Certificate, + CertificateKey: fixture.Key, + Resolver: &customProjectResolver{ + path: "group/project2/public", + }, + } + + tls, err := testDomain.EnsureCertificate() + require.NotNil(t, tls) + require.NoError(t, err) +} + +func TestCacheControlHeaders(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + testGroup := &domain.Domain{ + Resolver: &Group{ + name: "group", + projects: map[string]*projectConfig{ + "group.test.io": &projectConfig{}, + }, + }, + } + w := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://group.test.io/", nil) + require.NoError(t, err) + + now := time.Now() + serveFileOrNotFound(testGroup)(w, req) + + require.Equal(t, http.StatusOK, w.Code) + require.Equal(t, "max-age=600", w.Header().Get("Cache-Control")) + + expires := w.Header().Get("Expires") + require.NotEmpty(t, expires) + + expiresTime, err := time.Parse(time.RFC1123, expires) + require.NoError(t, err) + + require.WithinDuration(t, now.UTC().Add(10*time.Minute), expiresTime.UTC(), time.Minute) +} + +var chdirSet = false + +func setUpTests(t *testing.T) func() { + t.Helper() + return testhelpers.ChdirInPath(t, "../../../shared/pages", &chdirSet) +} diff --git a/internal/source/disk/group.go b/internal/source/disk/group.go new file mode 100644 index 000000000..9499df466 --- /dev/null +++ b/internal/source/disk/group.go @@ -0,0 +1,104 @@ +package disk + +import ( + "net/http" + "path" + "path/filepath" + "strings" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/host" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk/local" +) + +const ( + subgroupScanLimit int = 21 + // maxProjectDepth is set to the maximum nested project depth in gitlab (21) plus 3. + // One for the project, one for the first empty element of the split (URL.Path starts with /), + // and one for the real file path + maxProjectDepth int = subgroupScanLimit + 3 +) + +// Group represents a GitLab group with project configs and subgroups +type Group struct { + name string + + // nested groups + subgroups subgroups + + // group domains: + projects projects +} + +type projects map[string]*projectConfig +type subgroups map[string]*Group + +func (g *Group) digProjectWithSubpath(parentPath string, keys []string) (*projectConfig, string, string) { + if len(keys) >= 1 { + head := keys[0] + tail := keys[1:] + currentPath := path.Join(parentPath, head) + search := strings.ToLower(head) + + if project := g.projects[search]; project != nil { + return project, currentPath, path.Join(tail...) + } + + if subgroup := g.subgroups[search]; subgroup != nil { + return subgroup.digProjectWithSubpath(currentPath, tail) + } + } + + return nil, "", "" +} + +// Look up a project inside the domain based on the host and path. Returns the +// project and its name (if applicable) +func (g *Group) getProjectConfigWithSubpath(r *http.Request) (*projectConfig, string, string, string) { + // Check for a project specified in the URL: http://group.gitlab.io/projectA + // If present, these projects shadow the group domain. + split := strings.SplitN(r.URL.Path, "/", maxProjectDepth) + if len(split) >= 2 { + projectConfig, projectPath, urlPath := g.digProjectWithSubpath("", split[1:]) + if projectConfig != nil { + return projectConfig, "/" + projectPath, projectPath, urlPath + } + } + + // Since the URL doesn't specify a project (e.g. http://mydomain.gitlab.io), + // return the group project if it exists. + if host := host.FromRequest(r); host != "" { + if groupProject := g.projects[host]; groupProject != nil { + return groupProject, "/", host, strings.Join(split[1:], "/") + } + } + + return nil, "", "", "" +} + +// Resolve tries to find project and its config recursively for a given request +// to a group domain +func (g *Group) Resolve(r *http.Request) (*serving.Request, error) { + projectConfig, prefix, projectPath, subPath := g.getProjectConfigWithSubpath(r) + + if projectConfig == nil { + return nil, domain.ErrDomainDoesNotExist + } + + lookupPath := &serving.LookupPath{ + ServingType: "file", + Prefix: prefix, + Path: filepath.Join(g.name, projectPath, "public") + "/", + IsNamespaceProject: projectConfig.NamespaceProject, + IsHTTPSOnly: projectConfig.HTTPSOnly, + HasAccessControl: projectConfig.AccessControl, + ProjectID: projectConfig.ID, + } + + return &serving.Request{ + Serving: local.Instance(), + LookupPath: lookupPath, + SubPath: subPath, + }, nil +} diff --git a/internal/source/disk/group_test.go b/internal/source/disk/group_test.go new file mode 100644 index 000000000..d0fb49bd9 --- /dev/null +++ b/internal/source/disk/group_test.go @@ -0,0 +1,97 @@ +package disk + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGroupDig(t *testing.T) { + matchingProject := &projectConfig{ID: 1} + + tests := []struct { + name string + g Group + path string + expectedProject *projectConfig + expectedProjectPath string + expectedPath string + }{ + { + name: "empty group", + path: "projectb/demo/features.html", + g: Group{}, + }, + { + name: "group with project", + path: "projectb/demo/features.html", + g: Group{ + projects: projects{"projectb": matchingProject}, + }, + expectedProject: matchingProject, + expectedProjectPath: "projectb", + expectedPath: "demo/features.html", + }, + { + name: "group with project and no path in URL", + path: "projectb", + g: Group{ + projects: projects{"projectb": matchingProject}, + }, + expectedProject: matchingProject, + expectedProjectPath: "projectb", + }, + { + name: "group with subgroup and project", + path: "projectb/demo/features.html", + g: Group{ + projects: projects{"projectb": matchingProject}, + subgroups: subgroups{ + "sub1": &Group{ + projects: projects{"another": &projectConfig{}}, + }, + }, + }, + expectedProject: matchingProject, + expectedProjectPath: "projectb", + expectedPath: "demo/features.html", + }, + { + name: "group with project inside a subgroup", + path: "sub1/projectb/demo/features.html", + g: Group{ + subgroups: subgroups{ + "sub1": &Group{ + projects: projects{"projectb": matchingProject}, + }, + }, + projects: projects{"another": &projectConfig{}}, + }, + expectedProject: matchingProject, + expectedProjectPath: "sub1/projectb", + expectedPath: "demo/features.html", + }, + { + name: "group with matching subgroup but no project", + path: "sub1/projectb/demo/features.html", + g: Group{ + subgroups: subgroups{ + "sub1": &Group{ + projects: projects{"another": &projectConfig{}}, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + project, projectPath, urlPath := test.g.digProjectWithSubpath("", strings.Split(test.path, "/")) + + require.Equal(t, test.expectedProject, project) + require.Equal(t, test.expectedProjectPath, projectPath) + require.Equal(t, test.expectedPath, urlPath) + }) + } +} diff --git a/internal/source/disk/map.go b/internal/source/disk/map.go new file mode 100644 index 000000000..0413d409f --- /dev/null +++ b/internal/source/disk/map.go @@ -0,0 +1,307 @@ +package disk + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/karrick/godirwalk" + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +// preventive measure to skip `@hashed` dir for new zip deployments when sourcing config from disk +// https://gitlab.com/gitlab-org/gitlab-pages/-/issues/468 +const skipHashedDir = "@hashed" + +// Map maps domain names to Domain instances. +type Map map[string]*domain.Domain + +type domainsUpdater func(Map) + +func (dm Map) updateDomainMap(domainName string, domain *domain.Domain) { + if _, ok := dm[domainName]; ok { + log.WithFields(log.Fields{ + "domain_name": domainName, + }).Error("Duplicate domain") + } + + dm[domainName] = domain +} + +func (dm Map) addDomain(rootDomain, groupName, projectName string, config *domainConfig) { + newDomain := domain.New( + strings.ToLower(config.Domain), + config.Certificate, + config.Key, + &customProjectResolver{ + config: config, + path: filepath.Join(groupName, projectName, "public"), + }, + ) + + dm.updateDomainMap(newDomain.Name, newDomain) +} + +func (dm Map) updateGroupDomain(rootDomain, groupName, projectPath string, httpsOnly bool, accessControl bool, id uint64) { + domainName := strings.ToLower(groupName + "." + rootDomain) + groupDomain := dm[domainName] + + if groupDomain == nil { + groupResolver := &Group{ + name: groupName, + projects: make(projects), + subgroups: make(subgroups), + } + + groupDomain = domain.New(domainName, "", "", groupResolver) + } + + split := strings.SplitN(strings.ToLower(projectPath), "/", maxProjectDepth) + projectName := split[len(split)-1] + g := groupDomain.Resolver.(*Group) + + for i := 0; i < len(split)-1; i++ { + subgroupName := split[i] + subgroup := g.subgroups[subgroupName] + if subgroup == nil { + subgroup = &Group{ + name: subgroupName, + projects: make(projects), + subgroups: make(subgroups), + } + g.subgroups[subgroupName] = subgroup + } + + g = subgroup + } + + g.projects[projectName] = &projectConfig{ + NamespaceProject: domainName == projectName, + HTTPSOnly: httpsOnly, + AccessControl: accessControl, + ID: id, + } + + dm[domainName] = groupDomain +} + +func (dm Map) readProjectConfig(rootDomain string, group, projectName string, config *multiDomainConfig) { + if config == nil { + // This is necessary to preserve the previous behaviour where a + // group domain is created even if no config.json files are + // loaded successfully. Is it safe to remove this? + dm.updateGroupDomain(rootDomain, group, projectName, false, false, 0) + return + } + + dm.updateGroupDomain(rootDomain, group, projectName, config.HTTPSOnly, config.AccessControl, config.ID) + + for _, domainConfig := range config.Domains { + config := domainConfig // domainConfig is reused for each loop iteration + if domainConfig.Valid(rootDomain) { + dm.addDomain(rootDomain, group, projectName, &config) + } + } +} + +func readProject(group, parent, projectName string, level int, fanIn chan<- jobResult) { + if strings.HasPrefix(projectName, ".") { + return + } + + // Ignore projects that have .deleted in name + if strings.HasSuffix(projectName, ".deleted") { + return + } + + projectPath := filepath.Join(parent, projectName) + if _, err := os.Lstat(filepath.Join(group, projectPath, "public")); err != nil { + // maybe it's a subgroup + if level <= subgroupScanLimit { + buf := make([]byte, 2*os.Getpagesize()) + readProjects(group, projectPath, level+1, buf, fanIn) + } + + return + } + + // We read the config.json file _before_ fanning in, because it does disk + // IO and it does not need access to the domains map. + config := &multiDomainConfig{} + if err := config.Read(group, projectPath); err != nil { + config = nil + } + + fanIn <- jobResult{group: group, project: projectPath, config: config} +} + +func readProjects(group, parent string, level int, buf []byte, fanIn chan<- jobResult) { + subgroup := filepath.Join(group, parent) + fis, err := godirwalk.ReadDirents(subgroup, buf) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "group": group, + "parent": parent, + }).Print("readdir failed") + return + } + + for _, project := range fis { + // Ignore non directories + if !project.IsDir() { + continue + } + + readProject(group, parent, project.Name(), level, fanIn) + } +} + +type jobResult struct { + group string + project string + config *multiDomainConfig +} + +// ReadGroups walks the pages directory and populates dm with all the domains it finds. +func (dm Map) ReadGroups(rootDomain string, fis godirwalk.Dirents) { + fanOutGroups := make(chan string) + fanIn := make(chan jobResult) + wg := &sync.WaitGroup{} + for i := 0; i < 4; i++ { + wg.Add(1) + + go func() { + buf := make([]byte, 2*os.Getpagesize()) + + for group := range fanOutGroups { + if group == skipHashedDir { + continue + } + + started := time.Now() + + readProjects(group, "", 0, buf, fanIn) + + log.WithFields(log.Fields{ + "group": group, + "duration": time.Since(started).Seconds(), + }).Debug("Loaded projects for group") + } + + wg.Done() + }() + } + + go func() { + wg.Wait() + close(fanIn) + }() + + done := make(chan struct{}) + go func() { + for result := range fanIn { + dm.readProjectConfig(rootDomain, result.group, result.project, result.config) + } + + close(done) + }() + + for _, group := range fis { + if !group.IsDir() { + continue + } + if strings.HasPrefix(group.Name(), ".") { + continue + } + fanOutGroups <- group.Name() + } + close(fanOutGroups) + + <-done +} + +const ( + updateFile = ".update" +) + +// Watch polls the filesystem and kicks off a new domain directory scan when needed. +func Watch(rootDomain string, updater domainsUpdater, interval time.Duration) { + lastUpdate := []byte("no-update") + + for { + // Read the update file + update, err := ioutil.ReadFile(updateFile) + if err != nil && !os.IsNotExist(err) { + log.WithError(err).Print("failed to read update timestamp") + time.Sleep(interval) + continue + } + + // If it's the same ignore + if bytes.Equal(lastUpdate, update) { + time.Sleep(interval) + continue + } + lastUpdate = update + + started := time.Now() + dm := make(Map) + + fis, err := godirwalk.ReadDirents(".", nil) + if err != nil { + log.WithError(err).Warn("domain scan failed") + metrics.DomainFailedUpdates.Inc() + continue + } + + dm.ReadGroups(rootDomain, fis) + duration := time.Since(started).Seconds() + + var hash string + if len(update) < 1 { + hash = "" + } else { + hash = strings.TrimSpace(string(update)) + } + + logConfiguredDomains(dm) + + log.WithFields(log.Fields{ + "count(domains)": len(dm), + "duration": duration, + "hash": hash, + }).Info("Updated all domains") + + if updater != nil { + updater(dm) + } + + // Update prometheus metrics + metrics.DomainLastUpdateTime.Set(float64(time.Now().UTC().Unix())) + metrics.DomainsServed.Set(float64(len(dm))) + metrics.DomainsConfigurationUpdateDuration.Set(duration) + metrics.DomainUpdates.Inc() + + time.Sleep(interval) + } +} + +func logConfiguredDomains(dm Map) { + if log.GetLevel() != log.DebugLevel { + return + } + + for h, d := range dm { + log.WithFields(log.Fields{ + "domain": d, + "host": h, + }).Debug("Configured domain") + } +} diff --git a/internal/source/disk/map_test.go b/internal/source/disk/map_test.go new file mode 100644 index 000000000..2a5fd8288 --- /dev/null +++ b/internal/source/disk/map_test.go @@ -0,0 +1,253 @@ +package disk + +import ( + "crypto/rand" + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/karrick/godirwalk" + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" +) + +func getEntries(t require.TestingT) godirwalk.Dirents { + fis, err := godirwalk.ReadDirents(".", nil) + + require.NoError(t, err) + + return fis +} + +func TestReadProjects(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + dm := make(Map) + dm.ReadGroups("test.io", getEntries(t)) + + var domains []string + for d := range dm { + domains = append(domains, d) + } + + expectedDomains := []string{ + "group.test.io", + "group.internal.test.io", + "test.domain.com", // from config.json + "other.domain.com", + "domain.404.com", + "group.404.test.io", + "group.https-only.test.io", + "test.my-domain.com", + "test2.my-domain.com", + "no.cert.com", + "private.domain.com", + "group.auth.test.io", + "group.acme.test.io", + "withacmechallenge.domain.com", + "capitalgroup.test.io", + "group.404.gitlab-example.com", + "group.redirects.test.io", + "redirects.custom-domain.com", + } + + for _, expected := range domains { + require.Contains(t, domains, expected) + } + + for _, actual := range domains { + require.Contains(t, expectedDomains, actual) + } + + // Check that multiple domains in the same project are recorded faithfully + require.Equal(t, "test.domain.com", dm["test.domain.com"].Name) + require.Equal(t, "other.domain.com", dm["other.domain.com"].Name) + require.Equal(t, "test", dm["other.domain.com"].CertificateCert) + require.Equal(t, "key", dm["other.domain.com"].CertificateKey) + + // check subgroups + domain, ok := dm["group.test.io"] + require.True(t, ok, "missing group.test.io domain") + subgroup, ok := domain.Resolver.(*Group).subgroups["subgroup"] + require.True(t, ok, "missing group.test.io subgroup") + _, ok = subgroup.projects["project"] + require.True(t, ok, "missing project for subgroup in group.test.io domain") +} + +func TestReadProjectsMaxDepth(t *testing.T) { + nGroups := 3 + levels := subgroupScanLimit + 5 + cleanup := buildFakeDomainsDirectory(t, nGroups, levels) + defer cleanup() + + defaultDomain := "test.io" + dm := make(Map) + dm.ReadGroups(defaultDomain, getEntries(t)) + + var domains []string + for d := range dm { + domains = append(domains, d) + } + + var expectedDomains []string + for i := 0; i < nGroups; i++ { + expectedDomains = append(expectedDomains, fmt.Sprintf("group-%d.%s", i, defaultDomain)) + } + + for _, expected := range domains { + require.Contains(t, domains, expected) + } + + for _, actual := range domains { + // we are not checking config.json domains here + if !strings.HasSuffix(actual, defaultDomain) { + continue + } + require.Contains(t, expectedDomains, actual) + } + + // check subgroups + domain, ok := dm["group-0.test.io"] + require.True(t, ok, "missing group-0.test.io domain") + subgroup := domain.Resolver.(*Group) + for i := 0; i < levels; i++ { + subgroup, ok = subgroup.subgroups["sub"] + if i <= subgroupScanLimit { + require.True(t, ok, "missing group-0.test.io subgroup at level %d", i) + _, ok = subgroup.projects["project-0"] + require.True(t, ok, "missing project for subgroup in group-0.test.io domain at level %d", i) + } else { + require.False(t, ok, "subgroup level %d. Maximum allowed nesting level is %d", i, subgroupScanLimit) + break + } + } +} + +// This write must be atomic, otherwise we cannot predict the state of the +// domain watcher goroutine. We cannot use ioutil.WriteFile because that +// has a race condition where the file is empty, which can get picked up +// by the domain watcher. +func writeRandomTimestamp(t *testing.T) { + b := make([]byte, 10) + n, _ := rand.Read(b) + require.True(t, n > 0, "read some random bytes") + + temp, err := ioutil.TempFile(".", "TestWatch") + require.NoError(t, err) + _, err = temp.Write(b) + require.NoError(t, err, "write to tempfile") + require.NoError(t, temp.Close(), "close tempfile") + + require.NoError(t, os.Rename(temp.Name(), updateFile), "rename tempfile") +} + +func TestWatch(t *testing.T) { + cleanup := setUpTests(t) + defer cleanup() + + require.NoError(t, os.RemoveAll(updateFile)) + + update := make(chan Map) + go Watch("gitlab.io", func(dm Map) { + update <- dm + }, time.Microsecond*50) + + defer os.Remove(updateFile) + + domains := recvTimeout(t, update) + require.NotNil(t, domains, "if the domains are fetched on start") + + writeRandomTimestamp(t) + domains = recvTimeout(t, update) + require.NotNil(t, domains, "if the domains are updated after the creation") + + writeRandomTimestamp(t) + domains = recvTimeout(t, update) + require.NotNil(t, domains, "if the domains are updated after the timestamp change") +} + +func recvTimeout(t *testing.T, ch <-chan Map) Map { + timeout := 5 * time.Second + + select { + case dm := <-ch: + return dm + case <-time.After(timeout): + t.Fatalf("timeout after %v waiting for domain update", timeout) + return nil + } +} + +func buildFakeDomainsDirectory(t testing.TB, nGroups, levels int) func() { + testRoot, err := ioutil.TempDir("", "gitlab-pages-test") + require.NoError(t, err) + + for i := 0; i < nGroups; i++ { + parent := fmt.Sprintf("%s/group-%d", testRoot, i) + domain := fmt.Sprintf("%d.example.io", i) + buildFakeProjectsDirectory(t, parent, domain) + for j := 0; j < levels; j++ { + parent = fmt.Sprintf("%s/sub", parent) + domain = fmt.Sprintf("%d.%s", j, domain) + buildFakeProjectsDirectory(t, parent, domain) + } + if testing.Verbose() && i%100 == 0 { + fmt.Print(".") + } + } + + cleanup := testhelpers.ChdirInPath(t, testRoot, &chdirSet) + + return func() { + defer cleanup() + + if testing.Verbose() { + fmt.Printf("cleaning up test directory %s\n", testRoot) + } + + os.RemoveAll(testRoot) + } +} + +func buildFakeProjectsDirectory(t require.TestingT, groupPath, domain string) { + for j := 0; j < 5; j++ { + dir := fmt.Sprintf("%s/project-%d", groupPath, j) + require.NoError(t, os.MkdirAll(dir+"/public", 0755)) + + fakeConfig := fmt.Sprintf(`{"Domains":[{"Domain":"foo.%d.%s","Certificate":"bar","Key":"baz"}]}`, j, domain) + require.NoError(t, ioutil.WriteFile(dir+"/config.json", []byte(fakeConfig), 0644)) + } +} + +// this is a safeguard against compiler optimizations +// we use this package variable to make sure the benchmarkReadGroups loop +// has side effects outside of the loop. +// Without this the compiler (with the optimizations enabled) may remove the whole loop +var result int + +func benchmarkReadGroups(b *testing.B, groups, levels int) { + cleanup := buildFakeDomainsDirectory(b, groups, levels) + defer cleanup() + + b.ResetTimer() + + domainsCnt := 0 + for i := 0; i < b.N; i++ { + dm := make(Map) + dm.ReadGroups("example.com", getEntries(b)) + domainsCnt = len(dm) + } + result = domainsCnt +} + +func BenchmarkReadGroups(b *testing.B) { + b.Run("10 groups 3 levels", func(b *testing.B) { benchmarkReadGroups(b, 10, 3) }) + b.Run("100 groups 3 levels", func(b *testing.B) { benchmarkReadGroups(b, 100, 3) }) + b.Run("1000 groups 3 levels", func(b *testing.B) { benchmarkReadGroups(b, 1000, 3) }) + b.Run("10000 groups 1 levels", func(b *testing.B) { benchmarkReadGroups(b, 10000, 1) }) +} diff --git a/internal/source/domains.go b/internal/source/domains.go new file mode 100644 index 000000000..cf81fab2d --- /dev/null +++ b/internal/source/domains.go @@ -0,0 +1,163 @@ +package source + +import ( + "fmt" + "regexp" + + "gitlab.com/gitlab-org/labkit/log" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/disk" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab" +) + +var ( + // serverlessDomainRegex is a regular expression we use to check if a domain + // is a serverless domain, to short circuit gitlab source rollout. It can be + // removed after the rollout is done + serverlessDomainRegex = regexp.MustCompile(`^[^.]+-[[:xdigit:]]{2}a1[[:xdigit:]]{10}f2[[:xdigit:]]{2}[[:xdigit:]]+-?.*`) +) + +type configSource int + +const ( + sourceGitlab configSource = iota + sourceDisk + sourceAuto +) + +// Domains struct represents a map of all domains supported by pages. It is +// currently using two sources during the transition to the new GitLab domains +// source. +type Domains struct { + configSource configSource + gitlab Source + disk *disk.Disk // legacy disk source +} + +// NewDomains is a factory method for domains initializing a mutex. It should +// not initialize `dm` as we later check the readiness by comparing it with a +// nil value. +func NewDomains(config Config) (*Domains, error) { + domains := &Domains{} + if err := domains.setConfigSource(config); err != nil { + return nil, err + } + + return domains, nil +} + +// setConfigSource and initialize gitlab source +// returns error if -domain-config-source is not valid +// returns error if -domain-config-source=gitlab and init fails +func (d *Domains) setConfigSource(config Config) error { + switch config.DomainConfigSource() { + case "gitlab": + d.configSource = sourceGitlab + return d.setGitLabClient(config) + case "auto": + d.configSource = sourceAuto + // enable disk for auto for now + d.disk = disk.New() + return d.setGitLabClient(config) + case "disk": + // TODO: disable domains.disk https://gitlab.com/gitlab-org/gitlab-pages/-/issues/382 + d.configSource = sourceDisk + d.disk = disk.New() + default: + return fmt.Errorf("invalid option for -domain-config-source: %q", config.DomainConfigSource()) + } + + return nil +} + +// setGitLabClient when domain-config-source is `gitlab` or `auto`, only return error for `gitlab` source +func (d *Domains) setGitLabClient(config Config) error { + // We want to notify users about any API issues + // Creating a glClient will start polling connectivity in the background + // and spam errors in log + glClient, err := gitlab.New(config) + if err != nil { + if d.configSource == sourceGitlab { + return err + } + + log.WithError(err).Warn("failed to initialize GitLab client for `-domain-config-source=auto`") + + return nil + } + + d.gitlab = glClient + + return nil +} + +// GetDomain retrieves a domain information from a source. We are using two +// sources here because it allows us to switch behavior and the domain source +// for some subset of domains, to test / PoC the new GitLab Domains Source that +// we plan to use to replace the disk source. +func (d *Domains) GetDomain(name string) (*domain.Domain, error) { + return d.source(name).GetDomain(name) +} + +// Read starts the disk domain source. It is DEPRECATED, because we want to +// remove it entirely when disk source gets removed. +func (d *Domains) Read(rootDomain string) { + // start disk.Read for sourceDisk and sourceAuto + if d.configSource != sourceGitlab { + d.disk.Read(rootDomain) + } +} + +// IsReady checks if the disk domain source managed to traverse entire pages +// filesystem and is ready for use. It is DEPRECATED, because we want to remove +// it entirely when disk source gets removed. +func (d *Domains) IsReady() bool { + switch d.configSource { + case sourceGitlab: + return d.gitlab.IsReady() + case sourceDisk: + return d.disk.IsReady() + case sourceAuto: + // if gitlab is configured and is ready + if d.gitlab != nil && d.gitlab.IsReady() { + return true + } + + return d.disk.IsReady() + default: + return false + } +} + +func (d *Domains) source(domain string) Source { + // This check is only needed until we enable `d.gitlab` source in all + // environments (including on-premises installations) followed by removal of + // `d.disk` source. This can be safely removed afterwards. + if IsServerlessDomain(domain) { + return d.gitlab + } + + switch d.configSource { + case sourceDisk: + return d.disk + case sourceGitlab: + return d.gitlab + default: + if d.gitlab != nil && d.gitlab.IsReady() { + return d.gitlab + } + + return d.disk + } +} + +// IsServerlessDomain checks if a domain requested is a serverless domain we +// need to handle differently. +// +// Domain is a serverless domain when it matches `serverlessDomainRegex`. The +// regular expression is also defined on the gitlab-rails side, see +// https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/models/serverless/domain.rb#L7 +func IsServerlessDomain(domain string) bool { + return serverlessDomainRegex.MatchString(domain) +} diff --git a/internal/source/domains_test.go b/internal/source/domains_test.go new file mode 100644 index 000000000..abc82e423 --- /dev/null +++ b/internal/source/domains_test.go @@ -0,0 +1,195 @@ +package source + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/disk" +) + +type sourceConfig struct { + api string + secret string + domainSource string +} + +func (c sourceConfig) InternalGitLabServerURL() string { + return c.api +} + +func (c sourceConfig) GitlabAPISecret() []byte { + return []byte(c.secret) +} +func (c sourceConfig) GitlabClientConnectionTimeout() time.Duration { + return 10 * time.Second +} + +func (c sourceConfig) GitlabJWTTokenExpiry() time.Duration { + return 30 * time.Second +} + +func (c sourceConfig) DomainConfigSource() string { + return c.domainSource +} + +func TestNewDomains(t *testing.T) { + tests := []struct { + name string + sourceConfig sourceConfig + expectedErr string + expectGitlabNil bool + expectDiskNil bool + }{ + { + name: "no_source_config", + sourceConfig: sourceConfig{}, + expectedErr: "invalid option for -domain-config-source: \"\"", + }, + { + name: "invalid_source_config", + sourceConfig: sourceConfig{domainSource: "invalid"}, + expectedErr: "invalid option for -domain-config-source: \"invalid\"", + }, + { + name: "disk_source", + sourceConfig: sourceConfig{domainSource: "disk"}, + expectGitlabNil: true, + expectDiskNil: false, + }, + { + name: "auto_without_api_config", + sourceConfig: sourceConfig{domainSource: "auto"}, + expectGitlabNil: true, + expectDiskNil: false, + }, + { + name: "auto_with_api_config", + sourceConfig: sourceConfig{api: "https://gitlab.com", secret: "abc", domainSource: "auto"}, + expectGitlabNil: false, + expectDiskNil: false, + }, + { + name: "gitlab_source_success", + sourceConfig: sourceConfig{api: "https://gitlab.com", secret: "abc", domainSource: "gitlab"}, + expectDiskNil: true, + }, + { + name: "gitlab_source_no_url", + sourceConfig: sourceConfig{api: "", secret: "abc", domainSource: "gitlab"}, + expectedErr: "GitLab API URL or API secret has not been provided", + }, + { + name: "gitlab_source_no_secret", + sourceConfig: sourceConfig{api: "https://gitlab.com", secret: "", domainSource: "gitlab"}, + expectedErr: "GitLab API URL or API secret has not been provided", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + domains, err := NewDomains(tt.sourceConfig) + if tt.expectedErr != "" { + require.EqualError(t, err, tt.expectedErr) + return + } + require.NoError(t, err) + + require.Equal(t, tt.expectGitlabNil, domains.gitlab == nil) + require.Equal(t, tt.expectDiskNil, domains.disk == nil) + }) + } +} + +func TestGetDomain(t *testing.T) { + t.Run("when requesting an existing domain for gitlab source", func(t *testing.T) { + testDomain := "new-source-test.gitlab.io" + + newSource := NewMockSource() + newSource.On("GetDomain", testDomain). + Return(&domain.Domain{Name: testDomain}, nil). + Once() + defer newSource.AssertExpectations(t) + + domains := newTestDomains(t, newSource, sourceGitlab) + + domain, err := domains.GetDomain(testDomain) + require.NoError(t, err) + require.NotNil(t, domain) + }) + + t.Run("when requesting an existing domain for auto source", func(t *testing.T) { + testDomain := "new-source-test.gitlab.io" + + newSource := NewMockSource() + newSource.On("GetDomain", testDomain). + Return(&domain.Domain{Name: testDomain}, nil). + Once() + newSource.On("IsReady").Return(true).Once() + defer newSource.AssertExpectations(t) + + domains := newTestDomains(t, newSource, sourceAuto) + + domain, err := domains.GetDomain(testDomain) + require.NoError(t, err) + require.NotNil(t, domain) + }) + + t.Run("when requesting a domain that doesn't exist for gitlab source", func(t *testing.T) { + newSource := NewMockSource() + newSource.On("GetDomain", "does-not-exist.test.io"). + Return(nil, nil). + Once() + + defer newSource.AssertExpectations(t) + + domains := newTestDomains(t, newSource, sourceGitlab) + + domain, err := domains.GetDomain("does-not-exist.test.io") + require.NoError(t, err) + require.Nil(t, domain) + }) + + t.Run("when requesting a serverless domain", func(t *testing.T) { + testDomain := "func-aba1aabbccddeef2abaabbcc.serverless.gitlab.io" + + newSource := NewMockSource() + newSource.On("GetDomain", testDomain). + Return(&domain.Domain{Name: testDomain}, nil). + Once() + + defer newSource.AssertExpectations(t) + + domains := newTestDomains(t, newSource, sourceGitlab) + + domain, err := domains.GetDomain(testDomain) + require.NoError(t, err) + require.NotNil(t, domain) + }) +} + +func TestIsServerlessDomain(t *testing.T) { + t.Run("when a domain is serverless domain", func(t *testing.T) { + require.True(t, IsServerlessDomain("some-function-aba1aabbccddeef2abaabbcc.serverless.gitlab.io")) + }) + + t.Run("when a domain is serverless domain with environment", func(t *testing.T) { + require.True(t, IsServerlessDomain("some-function-aba1aabbccddeef2abaabbcc-testing.serverless.gitlab.io")) + }) + + t.Run("when a domain is not a serverless domain", func(t *testing.T) { + require.False(t, IsServerlessDomain("somedomain.gitlab.io")) + }) +} + +func newTestDomains(t *testing.T, gitlabSource *MockSource, config configSource) *Domains { + t.Helper() + + return &Domains{ + configSource: config, + gitlab: gitlabSource, + disk: disk.New(), + } +} diff --git a/internal/source/gitlab/api/client.go b/internal/source/gitlab/api/client.go new file mode 100644 index 000000000..181c580b4 --- /dev/null +++ b/internal/source/gitlab/api/client.go @@ -0,0 +1,14 @@ +package api + +import ( + "context" +) + +// Client represents an interface we use to retrieve information from GitLab +type Client interface { + // Resolve retrieves an VirtualDomain from the GitLab API and wraps it into a Lookup + GetLookup(ctx context.Context, domain string) Lookup + + // Status checks the connectivity with the GitLab API + Status() error +} diff --git a/internal/source/gitlab/api/lookup.go b/internal/source/gitlab/api/lookup.go new file mode 100644 index 000000000..73a3ce433 --- /dev/null +++ b/internal/source/gitlab/api/lookup.go @@ -0,0 +1,8 @@ +package api + +// Lookup defines an API lookup action with a response that GitLab sends +type Lookup struct { + Name string + Error error + Domain *VirtualDomain +} diff --git a/internal/source/gitlab/api/lookup_path.go b/internal/source/gitlab/api/lookup_path.go new file mode 100644 index 000000000..77b264ff1 --- /dev/null +++ b/internal/source/gitlab/api/lookup_path.go @@ -0,0 +1,32 @@ +package api + +// LookupPath represents a lookup path for a virtual domain +type LookupPath struct { + ProjectID int `json:"project_id,omitempty"` + AccessControl bool `json:"access_control,omitempty"` + HTTPSOnly bool `json:"https_only,omitempty"` + Prefix string `json:"prefix,omitempty"` + Source Source `json:"source,omitempty"` +} + +// Source describes GitLab Page serving variant +type Source struct { + Type string `json:"type,omitempty"` + Path string `json:"path,omitempty"` + Serverless Serverless `json:"serverless,omitempty"` +} + +// Serverless describes serverless serving configuration +type Serverless struct { + Service string `json:"service,omitempty"` + Cluster Cluster `json:"cluster,omitempty"` +} + +// Cluster describes serverless cluster configuration +type Cluster struct { + Address string `json:"address,omitempty"` + Port string `json:"port,omitempty"` + Hostname string `json:"hostname,omitempty"` + CertificateCert string `json:"cert,omitempty"` + CertificateKey string `json:"key,omitempty"` +} diff --git a/internal/source/gitlab/api/resolver.go b/internal/source/gitlab/api/resolver.go new file mode 100644 index 000000000..738278e2a --- /dev/null +++ b/internal/source/gitlab/api/resolver.go @@ -0,0 +1,15 @@ +package api + +import ( + "context" +) + +// Resolver represents an interface we use to retrieve information from GitLab +// in a more generic way. It can be a concrete API client or cached client. +type Resolver interface { + // Resolve retrieves an VirtualDomain from the GitLab API and wraps it into a Lookup + Resolve(ctx context.Context, domain string) *Lookup + + // Status checks the connectivity with the GitLab API + Status() error +} diff --git a/internal/source/gitlab/api/virtual_domain.go b/internal/source/gitlab/api/virtual_domain.go new file mode 100644 index 000000000..200c06de8 --- /dev/null +++ b/internal/source/gitlab/api/virtual_domain.go @@ -0,0 +1,10 @@ +package api + +// VirtualDomain represents a GitLab Pages virtual domain that is being sent +// from GitLab API +type VirtualDomain struct { + Certificate string `json:"certificate,omitempty"` + Key string `json:"key,omitempty"` + + LookupPaths []LookupPath `json:"lookup_paths"` +} diff --git a/internal/source/gitlab/cache/cache.go b/internal/source/gitlab/cache/cache.go new file mode 100644 index 000000000..37cef111b --- /dev/null +++ b/internal/source/gitlab/cache/cache.go @@ -0,0 +1,116 @@ +package cache + +import ( + "context" + "time" + + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +var defaultCacheConfig = cacheConfig{ + cacheExpiry: 10 * time.Minute, + entryRefreshTimeout: 60 * time.Second, + retrievalTimeout: 30 * time.Second, + maxRetrievalInterval: time.Second, + maxRetrievalRetries: 3, +} + +// Cache is a short and long caching mechanism for GitLab source +type Cache struct { + client api.Client + store Store +} + +type cacheConfig struct { + cacheExpiry time.Duration + entryRefreshTimeout time.Duration + retrievalTimeout time.Duration + maxRetrievalInterval time.Duration + maxRetrievalRetries int +} + +// NewCache creates a new instance of Cache. +func NewCache(client api.Client, cc *cacheConfig) *Cache { + if cc == nil { + cc = &defaultCacheConfig + } + + return &Cache{ + client: client, + store: newMemStore(client, cc), + } +} + +// Resolve is going to return a lookup based on a domain name. The caching +// algorithm works as follows: +// - We first check if the cache entry exists, and if it is up-to-date. If it +// is fresh we return the lookup entry from cache and it is a cache hit. +// - If entry is not up-to-date, that means it has been created in a cache +// more than `entryRefreshTimeout` duration ago, we schedule an asynchronous +// retrieval of the latest configuration we are going to obtain through the +// API, and we immediately return an old value, to avoid blocking clients. In +// this case it is also a cache hit. +// - If cache entry has not been populated with a lookup information yet, we +// block all the clients and make them wait until we retrieve the lookup from +// the GitLab API. Clients should not wait for longer than +// `retrievalTimeout`. It is a cache miss. +// +// We are going to retrieve a lookup from GitLab API using a retriever type. In +// case of failures (when GitLab API client returns an error) we will retry the +// operation a few times, waiting `maxRetrievalInterval` in between, total +// amount of requests is defined as `maxRetrievalRetries`. In case of an +// erroneous response, we will cache it, and it get recycled as every other +// cache entry. +// +// Examples: +// 1. Everything works +// - a client opens pages +// - we create a new cache entry +// - cache entry needs a warm up +// - a client waits until we retrieve a lookup +// - we successfully retrieve a lookup +// - we cache this response +// - and we pass it upstream to all clients +// 2. A domain does not exist +// - a client opens pages +// - we create a new cache entry +// - cache entry needs a warm up +// - a client waits until we retrieve a lookup +// - GitLab responded with a lookup and 204 HTTP status +// - we cache this response with domain being `nil` +// - we pass this lookup upstream to all the clients +// 3. GitLab is not responding +// - a client opens pages +// - we create a new cache entry +// - cache entry needs a warm up +// - a client waits until we retrieve a lookup +// - GitLab does not respond or responds with an error +// - we retry this retrieval every `maxRetrievalInterval` +// - we retry this retrieval `maxRetrievalRetries` in total +// - we create a lookup that contains information about an error +// - we cache this response +// - we pass this lookup upstream to all the clients +func (c *Cache) Resolve(ctx context.Context, domain string) *api.Lookup { + entry := c.store.LoadOrCreate(domain) + + if entry.IsUpToDate() { + metrics.DomainsSourceCacheHit.Inc() + return entry.Lookup() + } + + if entry.NeedsRefresh() { + entry.Refresh(c.client, c.store) + + metrics.DomainsSourceCacheHit.Inc() + return entry.Lookup() + } + + metrics.DomainsSourceCacheMiss.Inc() + return entry.Retrieve(ctx, c.client) +} + +// Status calls the client Status to check connectivity with the API +func (c *Cache) Status() error { + return c.client.Status() +} diff --git a/internal/source/gitlab/cache/cache_test.go b/internal/source/gitlab/cache/cache_test.go new file mode 100644 index 000000000..7ed56f5a6 --- /dev/null +++ b/internal/source/gitlab/cache/cache_test.go @@ -0,0 +1,229 @@ +package cache + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" +) + +type client struct { + counter uint64 + lookups chan uint64 + domain chan string + failure error +} + +func (c *client) GetLookup(ctx context.Context, _ string) api.Lookup { + lookup := api.Lookup{} + if c.failure == nil { + lookup.Name = <-c.domain + } else { + lookup.Error = c.failure + } + + c.lookups <- atomic.AddUint64(&c.counter, 1) + + return lookup +} + +func (c *client) Status() error { + return nil +} + +func withTestCache(config resolverConfig, cacheConfig *cacheConfig, block func(*Cache, *client)) { + var chanSize int + + if config.buffered { + chanSize = 1 + } else { + chanSize = 0 + } + + resolver := &client{ + domain: make(chan string, chanSize), + lookups: make(chan uint64, 100), + failure: config.failure, + } + + cache := NewCache(resolver, cacheConfig) + + block(cache, resolver) +} + +func (cache *Cache) withTestEntry(config entryConfig, block func(*Entry)) { + domain := "my.gitlab.com" + + if len(config.domain) > 0 { + domain = config.domain + } + + entry := cache.store.LoadOrCreate(domain) + + if config.retrieved { + entry.setResponse(api.Lookup{Name: domain}) + } + + if config.expired { + entry.created = time.Now().Add(-time.Hour) + } + + block(entry) +} + +type resolverConfig struct { + buffered bool + failure error +} + +type entryConfig struct { + domain string + expired bool + retrieved bool +} + +func TestResolve(t *testing.T) { + t.Run("when item is not cached", func(t *testing.T) { + withTestCache(resolverConfig{buffered: true}, nil, func(cache *Cache, resolver *client) { + require.Equal(t, 0, len(resolver.lookups)) + resolver.domain <- "my.gitlab.com" + + lookup := cache.Resolve(context.Background(), "my.gitlab.com") + + require.NoError(t, lookup.Error) + require.Equal(t, "my.gitlab.com", lookup.Name) + require.Equal(t, uint64(1), <-resolver.lookups) + }) + }) + + t.Run("when item is not cached and accessed multiple times", func(t *testing.T) { + withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { + wg := &sync.WaitGroup{} + ctx := context.Background() + + receiver := func() { + defer wg.Done() + cache.Resolve(ctx, "my.gitlab.com") + } + + wg.Add(3) + go receiver() + go receiver() + go receiver() + + require.Equal(t, 0, len(resolver.lookups)) + + resolver.domain <- "my.gitlab.com" + wg.Wait() + + require.Equal(t, uint64(1), <-resolver.lookups) + }) + }) + + t.Run("when item is in short cache", func(t *testing.T) { + withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { + cache.withTestEntry(entryConfig{expired: false, retrieved: true}, func(*Entry) { + lookup := cache.Resolve(context.Background(), "my.gitlab.com") + + require.Equal(t, "my.gitlab.com", lookup.Name) + require.Equal(t, 0, len(resolver.lookups)) + }) + }) + }) + + t.Run("when a non-retrieved new item is in short cache", func(t *testing.T) { + withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { + cache.withTestEntry(entryConfig{expired: false, retrieved: false}, func(*Entry) { + lookup := make(chan *api.Lookup, 1) + + go func() { + lookup <- cache.Resolve(context.Background(), "my.gitlab.com") + }() + + require.Equal(t, 0, len(resolver.lookups)) + + resolver.domain <- "my.gitlab.com" + <-lookup + + require.Equal(t, uint64(1), <-resolver.lookups) + }) + }) + }) + + t.Run("when item is in long cache only", func(t *testing.T) { + withTestCache(resolverConfig{buffered: false}, nil, func(cache *Cache, resolver *client) { + cache.withTestEntry(entryConfig{expired: true, retrieved: true}, func(*Entry) { + lookup := cache.Resolve(context.Background(), "my.gitlab.com") + + require.Equal(t, "my.gitlab.com", lookup.Name) + require.Equal(t, 0, len(resolver.lookups)) + + resolver.domain <- "my.gitlab.com" + + require.Equal(t, uint64(1), <-resolver.lookups) + }) + }) + }) + + t.Run("when item in long cache is requested multiple times", func(t *testing.T) { + withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { + cache.withTestEntry(entryConfig{expired: true, retrieved: true}, func(*Entry) { + cache.Resolve(context.Background(), "my.gitlab.com") + cache.Resolve(context.Background(), "my.gitlab.com") + cache.Resolve(context.Background(), "my.gitlab.com") + + require.Equal(t, 0, len(resolver.lookups)) + + resolver.domain <- "my.gitlab.com" + + require.Equal(t, uint64(1), <-resolver.lookups) + }) + }) + }) + + t.Run("when retrieval failed with an error", func(t *testing.T) { + cc := defaultCacheConfig + cc.maxRetrievalInterval = 0 + err := errors.New("500 error") + + withTestCache(resolverConfig{failure: err}, &cc, func(cache *Cache, resolver *client) { + lookup := cache.Resolve(context.Background(), "my.gitlab.com") + + require.Equal(t, 3, len(resolver.lookups)) + require.EqualError(t, lookup.Error, "500 error") + }) + }) + + t.Run("when retrieval failed because of an internal retriever context timeout", func(t *testing.T) { + cc := defaultCacheConfig + cc.retrievalTimeout = 0 + + withTestCache(resolverConfig{}, &cc, func(cache *Cache, resolver *client) { + lookup := cache.Resolve(context.Background(), "my.gitlab.com") + + require.Equal(t, 0, len(resolver.lookups)) + require.EqualError(t, lookup.Error, "retrieval context done") + }) + }) + + t.Run("when retrieval failed because of resolution context being canceled", func(t *testing.T) { + withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { + cache.withTestEntry(entryConfig{expired: false, retrieved: false}, func(entry *Entry) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + lookup := cache.Resolve(ctx, "my.gitlab.com") + resolver.domain <- "err.gitlab.com" + + require.Equal(t, "my.gitlab.com", lookup.Name) + require.EqualError(t, lookup.Error, "context done") + }) + }) + }) +} diff --git a/internal/source/gitlab/cache/entry.go b/internal/source/gitlab/cache/entry.go new file mode 100644 index 000000000..c960be8ac --- /dev/null +++ b/internal/source/gitlab/cache/entry.go @@ -0,0 +1,109 @@ +package cache + +import ( + "context" + "errors" + "sync" + "time" + + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" +) + +// Entry represents a cache object that can be retrieved asynchronously and +// holds a pointer to *api.Lookup when the domain lookup has been retrieved +// successfully +type Entry struct { + domain string + created time.Time + retrieve *sync.Once + refresh *sync.Once + mux *sync.RWMutex + retrieved chan struct{} + response *api.Lookup + refreshTimeout time.Duration + retriever *Retriever +} + +func newCacheEntry(domain string, refreshTimeout time.Duration, retriever *Retriever) *Entry { + return &Entry{ + domain: domain, + created: time.Now(), + retrieve: &sync.Once{}, + refresh: &sync.Once{}, + mux: &sync.RWMutex{}, + retrieved: make(chan struct{}), + refreshTimeout: refreshTimeout, + retriever: retriever, + } +} + +// IsUpToDate returns true if the entry has been resolved correctly and has not +// expired yet. False otherwise. +func (e *Entry) IsUpToDate() bool { + e.mux.RLock() + defer e.mux.RUnlock() + + return e.isResolved() && !e.isExpired() +} + +// NeedsRefresh return true if the entry has been resolved correctly but it has +// expired since then. +func (e *Entry) NeedsRefresh() bool { + e.mux.RLock() + defer e.mux.RUnlock() + + return e.isResolved() && e.isExpired() +} + +// Lookup returns a retriever Lookup response. +func (e *Entry) Lookup() *api.Lookup { + e.mux.RLock() + defer e.mux.RUnlock() + + return e.response +} + +// Retrieve perform a blocking retrieval of the cache entry response. +func (e *Entry) Retrieve(ctx context.Context, client api.Client) (lookup *api.Lookup) { + // We run the code within an additional func() to run both `e.setResponse` + // and `e.retrieve.Retrieve` asynchronously. + e.retrieve.Do(func() { go func() { e.setResponse(e.retriever.Retrieve(e.domain)) }() }) + + select { + case <-ctx.Done(): + lookup = &api.Lookup{Name: e.domain, Error: errors.New("context done")} + case <-e.retrieved: + lookup = e.Lookup() + } + + return lookup +} + +// Refresh will update the entry in the store only when it gets resolved. +func (e *Entry) Refresh(client api.Client, store Store) { + e.refresh.Do(func() { + go func() { + entry := newCacheEntry(e.domain, e.refreshTimeout, e.retriever) + + entry.Retrieve(context.Background(), client) + + store.ReplaceOrCreate(e.domain, entry) + }() + }) +} + +func (e *Entry) setResponse(lookup api.Lookup) { + e.mux.Lock() + defer e.mux.Unlock() + + e.response = &lookup + close(e.retrieved) +} + +func (e *Entry) isExpired() bool { + return time.Since(e.created) > e.refreshTimeout +} + +func (e *Entry) isResolved() bool { + return e.response != nil +} diff --git a/internal/source/gitlab/cache/entry_test.go b/internal/source/gitlab/cache/entry_test.go new file mode 100644 index 000000000..e6a865574 --- /dev/null +++ b/internal/source/gitlab/cache/entry_test.go @@ -0,0 +1,64 @@ +package cache + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" +) + +func TestIsUpToDateAndNeedsRefresh(t *testing.T) { + tests := []struct { + name string + resolved bool + expired bool + expectedIsUpToDate bool + expectedNeedRefresh bool + }{ + { + name: "resolved_and_not_expired", + resolved: true, + expired: false, + expectedIsUpToDate: true, + expectedNeedRefresh: false, + }, + { + name: "resolved_and_expired", + resolved: true, + expired: true, + expectedIsUpToDate: false, + expectedNeedRefresh: true, + }, + { + name: "not_resolved_and_not_expired", + resolved: false, + expired: false, + expectedIsUpToDate: false, + expectedNeedRefresh: false, + }, + { + name: "not_resolved_and_expired", + resolved: false, + expired: true, + expectedIsUpToDate: false, + expectedNeedRefresh: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + entry := newCacheEntry("my.gitlab.com", defaultCacheConfig.entryRefreshTimeout, nil) + if tt.resolved { + entry.response = &api.Lookup{} + } + if tt.expired { + entry.created = time.Now().Add(-time.Hour) + } + + require.Equal(t, tt.expectedIsUpToDate, entry.IsUpToDate()) + require.Equal(t, tt.expectedNeedRefresh, entry.NeedsRefresh()) + }) + } +} diff --git a/internal/source/gitlab/cache/memstore.go b/internal/source/gitlab/cache/memstore.go new file mode 100644 index 000000000..1d7c678dd --- /dev/null +++ b/internal/source/gitlab/cache/memstore.go @@ -0,0 +1,61 @@ +package cache + +import ( + "sync" + "time" + + cache "github.com/patrickmn/go-cache" + + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" +) + +type memstore struct { + store *cache.Cache + mux *sync.RWMutex + retriever *Retriever + entryRefreshTimeout time.Duration +} + +func newMemStore(client api.Client, cc *cacheConfig) Store { + retriever := NewRetriever(client, cc.retrievalTimeout, cc.maxRetrievalInterval, cc.maxRetrievalRetries) + return &memstore{ + store: cache.New(cc.cacheExpiry, time.Minute), + mux: &sync.RWMutex{}, + retriever: retriever, + entryRefreshTimeout: cc.entryRefreshTimeout, + } +} + +// LoadOrCreate writes or retrieves a domain entry from the cache in a +// thread-safe way, trying to make this read-preferring RW locking. +func (m *memstore) LoadOrCreate(domain string) *Entry { + m.mux.RLock() + entry, exists := m.store.Get(domain) + m.mux.RUnlock() + + if exists { + return entry.(*Entry) + } + + m.mux.Lock() + defer m.mux.Unlock() + + if entry, exists = m.store.Get(domain); exists { + return entry.(*Entry) + } + + newEntry := newCacheEntry(domain, m.entryRefreshTimeout, m.retriever) + m.store.SetDefault(domain, newEntry) + + return newEntry +} + +func (m *memstore) ReplaceOrCreate(domain string, entry *Entry) *Entry { + m.mux.Lock() + defer m.mux.Unlock() + + m.store.Delete(domain) + m.store.SetDefault(domain, entry) + + return entry +} diff --git a/internal/source/gitlab/cache/retriever.go b/internal/source/gitlab/cache/retriever.go new file mode 100644 index 000000000..edd8663e0 --- /dev/null +++ b/internal/source/gitlab/cache/retriever.go @@ -0,0 +1,116 @@ +package cache + +import ( + "context" + "errors" + "sync" + + "time" + + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" +) + +// Retriever is an utility type that performs an HTTP request with backoff in +// case of errors +type Retriever struct { + timer timer + client api.Client + retrievalTimeout time.Duration + maxRetrievalInterval time.Duration + maxRetrievalRetries int +} + +type timer struct { + mu *sync.Mutex + stopped bool + timer *time.Timer +} + +// NewRetriever creates a Retriever with a client +func NewRetriever(client api.Client, retrievalTimeout, maxRetrievalInterval time.Duration, maxRetrievalRetries int) *Retriever { + return &Retriever{ + timer: timer{ + mu: &sync.Mutex{}, + }, + client: client, + retrievalTimeout: retrievalTimeout, + maxRetrievalInterval: maxRetrievalInterval, + maxRetrievalRetries: maxRetrievalRetries, + } +} + +// Retrieve retrieves a lookup response from external source with timeout and +// backoff. It has its own context with timeout. +func (r *Retriever) Retrieve(domain string) (lookup api.Lookup) { + ctx, cancel := context.WithTimeout(context.Background(), r.retrievalTimeout) + defer cancel() + + select { + case <-ctx.Done(): + log.Debug("retrieval context done") + lookup = api.Lookup{Error: errors.New("retrieval context done")} + case lookup = <-r.resolveWithBackoff(ctx, domain): + log.Debug("retrieval response sent") + } + + return lookup +} + +func (r *Retriever) resolveWithBackoff(ctx context.Context, domainName string) <-chan api.Lookup { + response := make(chan api.Lookup) + + go func() { + var lookup api.Lookup + + Retry: + for i := 1; i <= r.maxRetrievalRetries; i++ { + lookup = r.client.GetLookup(ctx, domainName) + + if lookup.Error == nil || errors.Is(lookup.Error, domain.ErrDomainDoesNotExist) { + r.timer.start(r.maxRetrievalInterval) + select { + case <-r.timer.timer.C: + // retry to GetLookup + continue Retry + case <-ctx.Done(): + log.WithError(ctx.Err()).Debug("domain retrieval backoff canceled by context") + // when the retrieval context is done we stop the timer + r.timer.stop() + break Retry + } + } else { + break + } + } + + response <- lookup + close(response) + }() + + return response +} + +func (t *timer) start(d time.Duration) { + t.mu.Lock() + defer t.mu.Unlock() + + t.stopped = false + t.timer = time.NewTimer(d) +} + +func (t *timer) stop() { + t.mu.Lock() + defer t.mu.Unlock() + + t.stopped = t.timer.Stop() +} + +func (t *timer) hasStopped() bool { + t.mu.Lock() + defer t.mu.Unlock() + + return t.stopped +} diff --git a/internal/source/gitlab/cache/retriever_test.go b/internal/source/gitlab/cache/retriever_test.go new file mode 100644 index 000000000..774e9779e --- /dev/null +++ b/internal/source/gitlab/cache/retriever_test.go @@ -0,0 +1,27 @@ +package cache + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestRetrieveTimerStopsWhenContextIsDone(t *testing.T) { + retrievalTimeout := time.Millisecond // quick timeout that cancels inner context + maxRetrievalInterval := time.Minute // long sleep inside resolveWithBackoff + + resolver := &client{ + domain: make(chan string), + lookups: make(chan uint64, 1), + failure: errors.New("500 error"), + } + + retriever := NewRetriever(resolver, retrievalTimeout, maxRetrievalInterval, 3) + require.False(t, retriever.timer.hasStopped(), "timer has not been stopped yet") + + lookup := retriever.Retrieve("my.gitlab.com") + require.Empty(t, lookup.Name) + require.Eventually(t, retriever.timer.hasStopped, time.Second, time.Millisecond, "timer must have been stopped") +} diff --git a/internal/source/gitlab/cache/store.go b/internal/source/gitlab/cache/store.go new file mode 100644 index 000000000..3cf8aac1d --- /dev/null +++ b/internal/source/gitlab/cache/store.go @@ -0,0 +1,7 @@ +package cache + +// Store defines an interface describing an abstract cache store +type Store interface { + LoadOrCreate(domain string) *Entry + ReplaceOrCreate(domain string, entry *Entry) *Entry +} diff --git a/internal/source/gitlab/client/client.go b/internal/source/gitlab/client/client.go new file mode 100644 index 000000000..524eb8a43 --- /dev/null +++ b/internal/source/gitlab/client/client.go @@ -0,0 +1,209 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/dgrijalva/jwt-go" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/httptransport" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +// ConnectionErrorMsg to be returned with `gc.Status` if Pages +// fails to connect to the internal GitLab API, times out +// or a 401 given that the credentials used are wrong +const ConnectionErrorMsg = "failed to connect to internal Pages API" + +// Client is a HTTP client to access Pages internal API +type Client struct { + secretKey []byte + baseURL *url.URL + httpClient *http.Client + jwtTokenExpiry time.Duration +} + +// NewClient initializes and returns new Client baseUrl is +// appConfig.InternalGitLabServer secretKey is appConfig.GitLabAPISecretKey +func NewClient(baseURL string, secretKey []byte, connectionTimeout, jwtTokenExpiry time.Duration) (*Client, error) { + if len(baseURL) == 0 || len(secretKey) == 0 { + return nil, errors.New("GitLab API URL or API secret has not been provided") + } + + parsedURL, err := url.Parse(baseURL) + if err != nil { + return nil, err + } + + if connectionTimeout == 0 { + return nil, errors.New("GitLab HTTP client connection timeout has not been provided") + } + + if jwtTokenExpiry == 0 { + return nil, errors.New("GitLab JWT token expiry has not been provided") + } + + return &Client{ + secretKey: secretKey, + baseURL: parsedURL, + httpClient: &http.Client{ + Timeout: connectionTimeout, + Transport: httptransport.NewTransportWithMetrics( + "gitlab_internal_api", + metrics.DomainsSourceAPITraceDuration, + metrics.DomainsSourceAPICallDuration, + metrics.DomainsSourceAPIReqTotal, + httptransport.DefaultTTFBTimeout, + ), + }, + jwtTokenExpiry: jwtTokenExpiry, + }, nil +} + +// NewFromConfig creates a new client from Config struct +func NewFromConfig(config Config) (*Client, error) { + return NewClient(config.InternalGitLabServerURL(), config.GitlabAPISecret(), config.GitlabClientConnectionTimeout(), config.GitlabJWTTokenExpiry()) +} + +// Resolve returns a VirtualDomain configuration wrapped into a Lookup for a +// given host. It implements api.Resolve type. +func (gc *Client) Resolve(ctx context.Context, host string) *api.Lookup { + lookup := gc.GetLookup(ctx, host) + + return &lookup +} + +// GetLookup returns a VirtualDomain configuration wrapped into a Lookup for a +// given host +func (gc *Client) GetLookup(ctx context.Context, host string) api.Lookup { + params := url.Values{} + params.Set("host", host) + + resp, err := gc.get(ctx, "/api/v4/internal/pages", params) + if err != nil { + return api.Lookup{Name: host, Error: err} + } + + if resp == nil { + return api.Lookup{Name: host, Error: domain.ErrDomainDoesNotExist} + } + + // ensure that entire response body has been read and close it, to make it + // possible to reuse HTTP connection. In case of a JSON being invalid and + // larger than 512 bytes, the response body will not be closed properly, thus + // we need to close it manually in every case. + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() + + lookup := api.Lookup{Name: host} + lookup.Error = json.NewDecoder(resp.Body).Decode(&lookup.Domain) + + return lookup +} + +// Status checks that Pages can reach the rails internal Pages API +// for source domain configuration. +// Timeout is the same as -gitlab-client-http-timeout +func (gc *Client) Status() error { + res, err := gc.get(context.Background(), "/api/v4/internal/pages/status", url.Values{}) + if err != nil { + return fmt.Errorf("%s: %v", ConnectionErrorMsg, err) + } + + if res != nil && res.Body != nil { + res.Body.Close() + } + + return nil +} + +func (gc *Client) get(ctx context.Context, path string, params url.Values) (*http.Response, error) { + endpoint, err := gc.endpoint(path, params) + if err != nil { + return nil, err + } + + req, err := gc.request(ctx, "GET", endpoint) + if err != nil { + return nil, err + } + + resp, err := gc.httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp == nil { + return nil, errors.New("unknown response") + } + + // StatusOK means we should return the API response + if resp.StatusCode == http.StatusOK { + return resp, nil + } + + // nolint: errcheck + // best effort to discard and close the response body + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + + // StatusNoContent means that a domain does not exist, it is not an error + if resp.StatusCode == http.StatusNoContent { + return nil, nil + } + + return nil, fmt.Errorf("HTTP status: %d", resp.StatusCode) +} + +func (gc *Client) endpoint(path string, params url.Values) (*url.URL, error) { + endpoint, err := gc.baseURL.Parse(path) + if err != nil { + return nil, err + } + + endpoint.RawQuery = params.Encode() + + return endpoint, nil +} + +func (gc *Client) request(ctx context.Context, method string, endpoint *url.URL) (*http.Request, error) { + req, err := http.NewRequest(method, endpoint.String(), nil) + if err != nil { + return nil, err + } + + req = req.WithContext(ctx) + + token, err := gc.token() + if err != nil { + return nil, err + } + req.Header.Set("Gitlab-Pages-Api-Request", token) + + return req, nil +} + +func (gc *Client) token() (string, error) { + claims := jwt.StandardClaims{ + Issuer: "gitlab-pages", + ExpiresAt: time.Now().UTC().Add(gc.jwtTokenExpiry).Unix(), + } + + token, err := jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString(gc.secretKey) + if err != nil { + return "", err + } + + return token, nil +} diff --git a/internal/source/gitlab/client/client_stub.go b/internal/source/gitlab/client/client_stub.go new file mode 100644 index 000000000..de6161e62 --- /dev/null +++ b/internal/source/gitlab/client/client_stub.go @@ -0,0 +1,42 @@ +package client + +import ( + "context" + "encoding/json" + "os" + + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" +) + +// StubClient is a stubbed client used for testing +type StubClient struct { + File string + StatusErr func() error +} + +// Resolve implements api.Resolver +func (c StubClient) Resolve(ctx context.Context, host string) *api.Lookup { + lookup := c.GetLookup(ctx, host) + + return &lookup +} + +// GetLookup reads a test fixture and unmarshalls it +func (c StubClient) GetLookup(ctx context.Context, host string) api.Lookup { + lookup := api.Lookup{Name: host} + + f, err := os.Open(c.File) + if err != nil { + lookup.Error = err + return lookup + } + defer f.Close() + + lookup.Error = json.NewDecoder(f).Decode(&lookup.Domain) + + return lookup +} + +func (c StubClient) Status() error { + return c.StatusErr() +} diff --git a/internal/source/gitlab/client/client_test.go b/internal/source/gitlab/client/client_test.go new file mode 100644 index 000000000..153b9372e --- /dev/null +++ b/internal/source/gitlab/client/client_test.go @@ -0,0 +1,354 @@ +package client + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "net/http/httptrace" + "testing" + "time" + + "github.com/dgrijalva/jwt-go" + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/fixture" +) + +const ( + defaultClientConnTimeout = 10 * time.Second + defaultJWTTokenExpiry = 30 * time.Second +) + +func TestConnectionReuse(t *testing.T) { + mux := http.NewServeMux() + + mux.HandleFunc("/api/v4/internal/pages", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + + // we want to test for an invalid JSON that is larger than 512 bytes + b := make([]byte, 513) + for i := range b { + b[i] = 'x' + } + + w.Write(b) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + client := defaultClient(t, server.URL) + reused := make(chan bool, 2) + + trace := &httptrace.ClientTrace{ + GotConn: func(connInfo httptrace.GotConnInfo) { + reused <- connInfo.Reused + }, + } + + ctx := httptrace.WithClientTrace(context.Background(), trace) + client.GetLookup(ctx, "group.gitlab.io") + client.GetLookup(ctx, "group.gitlab.io") + + require.False(t, <-reused) + require.True(t, <-reused) +} + +func TestNewValidBaseURL(t *testing.T) { + _, err := NewClient("https://gitlab.com", secretKey(t), defaultClientConnTimeout, defaultJWTTokenExpiry) + require.NoError(t, err) +} + +func TestNewInvalidConfiguration(t *testing.T) { + type args struct { + baseURL string + secretKey []byte + connectionTimeout time.Duration + jwtTokenExpiry time.Duration + } + + tests := []struct { + name string + args args + wantErrMsg string + }{ + { + name: "invalid_api_url", + args: args{ + baseURL: "%", + secretKey: secretKey(t), + connectionTimeout: defaultClientConnTimeout, + jwtTokenExpiry: defaultJWTTokenExpiry, + }, + wantErrMsg: "invalid URL escape \"%\"", + }, + { + name: "invalid_api_url_empty", + args: args{ + baseURL: "", + secretKey: secretKey(t), + connectionTimeout: defaultClientConnTimeout, + jwtTokenExpiry: defaultJWTTokenExpiry, + }, + wantErrMsg: "GitLab API URL or API secret has not been provided", + }, + { + name: "invalid_api_secret_empty", + args: args{ + baseURL: "https://gitlab.com", + secretKey: []byte{}, + connectionTimeout: defaultClientConnTimeout, + jwtTokenExpiry: defaultJWTTokenExpiry, + }, + wantErrMsg: "GitLab API URL or API secret has not been provided", + }, + { + name: "invalid_http_client_timeout", + args: args{ + baseURL: "https://gitlab.com", + secretKey: secretKey(t), + connectionTimeout: 0, + jwtTokenExpiry: defaultJWTTokenExpiry, + }, + wantErrMsg: "GitLab HTTP client connection timeout has not been provided", + }, + { + name: "invalid_jwt_token_expiry", + args: args{ + baseURL: "https://gitlab.com", + secretKey: secretKey(t), + connectionTimeout: defaultClientConnTimeout, + jwtTokenExpiry: 0, + }, + wantErrMsg: "GitLab JWT token expiry has not been provided", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewClient(tt.args.baseURL, tt.args.secretKey, tt.args.connectionTimeout, tt.args.jwtTokenExpiry) + require.Nil(t, got) + require.NotNil(t, err) + require.Contains(t, err.Error(), tt.wantErrMsg) + }) + } +} +func TestLookupForErrorResponses(t *testing.T) { + tests := map[int]string{ + http.StatusUnauthorized: "HTTP status: 401", + http.StatusNotFound: "HTTP status: 404", + } + + for statusCode, expectedError := range tests { + name := fmt.Sprintf("%d %s", statusCode, expectedError) + t.Run(name, func(t *testing.T) { + mux := http.NewServeMux() + + mux.HandleFunc("/api/v4/internal/pages", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(statusCode) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + client := defaultClient(t, server.URL) + + lookup := client.GetLookup(context.Background(), "group.gitlab.io") + + require.EqualError(t, lookup.Error, expectedError) + require.Nil(t, lookup.Domain) + }) + } +} + +func TestMissingDomain(t *testing.T) { + mux := http.NewServeMux() + + mux.HandleFunc("/api/v4/internal/pages", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + client := defaultClient(t, server.URL) + + lookup := client.GetLookup(context.Background(), "group.gitlab.io") + + require.True(t, errors.Is(lookup.Error, domain.ErrDomainDoesNotExist)) + require.Nil(t, lookup.Domain) +} + +func TestGetVirtualDomainAuthenticatedRequest(t *testing.T) { + mux := http.NewServeMux() + + mux.HandleFunc("/api/v4/internal/pages", func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, "GET", r.Method) + require.Equal(t, "group.gitlab.io", r.FormValue("host")) + + validateToken(t, r.Header.Get("Gitlab-Pages-Api-Request")) + + response := `{ + "certificate": "foo", + "key": "bar", + "lookup_paths": [ + { + "project_id": 123, + "access_control": false, + "source": { + "type": "file", + "path": "mygroup/myproject/public/" + }, + "https_only": true, + "prefix": "/myproject/" + } + ] + }` + + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, response) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + client := defaultClient(t, server.URL) + + lookup := client.GetLookup(context.Background(), "group.gitlab.io") + require.NoError(t, lookup.Error) + + require.Equal(t, "foo", lookup.Domain.Certificate) + require.Equal(t, "bar", lookup.Domain.Key) + + lookupPath := lookup.Domain.LookupPaths[0] + require.Equal(t, 123, lookupPath.ProjectID) + require.Equal(t, false, lookupPath.AccessControl) + require.Equal(t, true, lookupPath.HTTPSOnly) + require.Equal(t, "/myproject/", lookupPath.Prefix) + + require.Equal(t, "file", lookupPath.Source.Type) + require.Equal(t, "mygroup/myproject/public/", lookupPath.Source.Path) +} + +func TestClientStatus(t *testing.T) { + tests := []struct { + name string + status int + wantErr bool + }{ + { + name: "api_enabled", + status: http.StatusNoContent, + }, + { + name: "api_unauthorized", + status: http.StatusUnauthorized, + wantErr: true, + }, + { + name: "server_error", + status: http.StatusInternalServerError, + wantErr: true, + }, + { + name: "gateway_timeout", + status: http.StatusGatewayTimeout, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/api/v4/internal/pages/status", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tt.status) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + client := defaultClient(t, server.URL) + + err := client.Status() + if tt.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), ConnectionErrorMsg) + return + } + + require.NoError(t, err) + }) + } +} + +func TestClientStatusClientTimeout(t *testing.T) { + timeout := 20 * time.Millisecond + + mux := http.NewServeMux() + mux.HandleFunc("/api/v4/internal/pages/status", func(w http.ResponseWriter, r *http.Request) { + time.Sleep(timeout * 3) + + w.WriteHeader(http.StatusOK) + }) + + server := httptest.NewServer(mux) + defer server.Close() + + client := defaultClient(t, server.URL) + client.httpClient.Timeout = timeout + + err := client.Status() + require.Error(t, err) + // we can receive any of these messages + // - context deadline exceeded (Client.Timeout exceeded while awaiting headers) + // - net/http: request canceled (Client.Timeout exceeded while awaiting headers) + // - context deadline exceeded + require.Contains(t, err.Error(), "exceeded") +} + +func TestClientStatusConnectionRefused(t *testing.T) { + client := defaultClient(t, "http://localhost:1234") + + err := client.Status() + require.Error(t, err) + require.Contains(t, err.Error(), "connection refused") +} + +func validateToken(t *testing.T, tokenString string) { + t.Helper() + token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + + return secretKey(t), nil + }) + require.NoError(t, err) + + claims, ok := token.Claims.(jwt.MapClaims) + require.True(t, ok) + require.True(t, token.Valid) + require.NotNil(t, claims["exp"]) + require.Equal(t, "gitlab-pages", claims["iss"]) +} + +func secretKey(t *testing.T) []byte { + t.Helper() + + secretKey, err := base64.StdEncoding.DecodeString(fixture.GitLabAPISecretKey) + require.NoError(t, err) + + return secretKey +} + +func defaultClient(t *testing.T, url string) *Client { + t.Helper() + + client, err := NewClient(url, secretKey(t), defaultClientConnTimeout, defaultJWTTokenExpiry) + require.NoError(t, err) + + return client +} diff --git a/internal/source/gitlab/client/config.go b/internal/source/gitlab/client/config.go new file mode 100644 index 000000000..bd9aa0619 --- /dev/null +++ b/internal/source/gitlab/client/config.go @@ -0,0 +1,13 @@ +package client + +import "time" + +// Config represents an interface that is configuration provider for client +// capable of comunicating with GitLab +type Config interface { + InternalGitLabServerURL() string + GitlabAPISecret() []byte + GitlabClientConnectionTimeout() time.Duration + GitlabJWTTokenExpiry() time.Duration + DomainConfigSource() string +} diff --git a/internal/source/gitlab/client/testdata/test.gitlab.io.json b/internal/source/gitlab/client/testdata/test.gitlab.io.json new file mode 100644 index 000000000..e3430119b --- /dev/null +++ b/internal/source/gitlab/client/testdata/test.gitlab.io.json @@ -0,0 +1,36 @@ +{ + "certificate": "some--cert", + "key": "some--key", + "lookup_paths": [ + { + "access_control": false, + "https_only": true, + "prefix": "/my/pages/project/", + "project_id": 123, + "source": { + "path": "some/path/to/project/", + "type": "file" + } + }, + { + "access_control": false, + "https_only": true, + "prefix": "/my/second-project/", + "project_id": 124, + "source": { + "path": "some/path/to/project-2/", + "type": "file" + } + }, + { + "access_control": false, + "https_only": true, + "prefix": "/", + "project_id": 125, + "source": { + "path": "some/path/to/project-3/", + "type": "file" + } + } + ] +} diff --git a/internal/source/gitlab/factory.go b/internal/source/gitlab/factory.go new file mode 100644 index 000000000..b033a592a --- /dev/null +++ b/internal/source/gitlab/factory.go @@ -0,0 +1,57 @@ +package gitlab + +import ( + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk/local" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk/zip" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" +) + +// fabricateLookupPath fabricates a serving LookupPath based on the API LookupPath +// `size` argument is DEPRECATED, see +// https://gitlab.com/gitlab-org/gitlab-pages/issues/272 +func fabricateLookupPath(size int, lookup api.LookupPath) *serving.LookupPath { + return &serving.LookupPath{ + ServingType: lookup.Source.Type, + Path: lookup.Source.Path, + Prefix: lookup.Prefix, + IsNamespaceProject: (lookup.Prefix == "/" && size > 1), + IsHTTPSOnly: lookup.HTTPSOnly, + HasAccessControl: lookup.AccessControl, + ProjectID: uint64(lookup.ProjectID), + } +} + +// fabricateServing fabricates serving based on the GitLab API response +func fabricateServing(lookup api.LookupPath) serving.Serving { + source := lookup.Source + + switch source.Type { + case "file": + return local.Instance() + case "zip": + return zip.Instance() + case "serverless": + log.Errorf("attempted to fabricate serverless serving for project %d", lookup.ProjectID) + + // This feature has been disalbed, for more details see + // https://gitlab.com/gitlab-org/gitlab-pages/-/issues/467 + // + // serving, err := serverless.NewFromAPISource(source.Serverless) + // if err != nil { + // log.WithError(err).Errorf("could not fabricate serving for project %d", lookup.ProjectID) + // + // break + // } + // + // return serving + } + + return defaultServing() +} + +func defaultServing() serving.Serving { + return local.Instance() +} diff --git a/internal/source/gitlab/factory_test.go b/internal/source/gitlab/factory_test.go new file mode 100644 index 000000000..46740d354 --- /dev/null +++ b/internal/source/gitlab/factory_test.go @@ -0,0 +1,65 @@ +package gitlab + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/fixture" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving/disk" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" +) + +func TestFabricateLookupPath(t *testing.T) { + t.Run("when lookup path is not a namespace project", func(t *testing.T) { + lookup := api.LookupPath{Prefix: "/something"} + + path := fabricateLookupPath(1, lookup) + + require.Equal(t, path.Prefix, "/something") + require.False(t, path.IsNamespaceProject) + }) + + t.Run("when lookup path is a namespace project", func(t *testing.T) { + lookup := api.LookupPath{Prefix: "/"} + + path := fabricateLookupPath(2, lookup) + + require.Equal(t, path.Prefix, "/") + require.True(t, path.IsNamespaceProject) + }) +} + +func TestFabricateServing(t *testing.T) { + t.Run("when lookup path requires disk serving", func(t *testing.T) { + lookup := api.LookupPath{ + Prefix: "/", + Source: api.Source{Type: "file"}, + } + + require.IsType(t, &disk.Disk{}, fabricateServing(lookup)) + }) + + t.Run("when lookup path requires serverless serving", func(t *testing.T) { + lookup := api.LookupPath{ + Prefix: "/", + Source: api.Source{ + Type: "serverless", + Serverless: api.Serverless{ + Service: "my-func.knative.example.com", + Cluster: api.Cluster{ + Address: "127.0.0.10", + Port: "443", + Hostname: "my-cluster.example.com", + CertificateCert: fixture.Certificate, + CertificateKey: fixture.Key, + }, + }, + }, + } + + // Serverless serving has been deprecated. + // require.IsType(t, &serverless.Serverless{}, fabricateServing(lookup)) + require.IsType(t, &disk.Disk{}, fabricateServing(lookup)) + }) +} diff --git a/internal/source/gitlab/gitlab.go b/internal/source/gitlab/gitlab.go new file mode 100644 index 000000000..d164460ab --- /dev/null +++ b/internal/source/gitlab/gitlab.go @@ -0,0 +1,102 @@ +package gitlab + +import ( + "context" + "net/http" + "path" + "strings" + "sync" + + "github.com/cenkalti/backoff/v4" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + "gitlab.com/gitlab-org/gitlab-pages/internal/request" + "gitlab.com/gitlab-org/gitlab-pages/internal/serving" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/cache" + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/client" +) + +// Gitlab source represent a new domains configuration source. We fetch all the +// information about domains from GitLab instance. +type Gitlab struct { + client api.Resolver + mu *sync.RWMutex + isReady bool +} + +// New returns a new instance of gitlab domain source. +func New(config client.Config) (*Gitlab, error) { + client, err := client.NewFromConfig(config) + if err != nil { + return nil, err + } + + g := &Gitlab{ + client: cache.NewCache(client, nil), + mu: &sync.RWMutex{}, + } + + go g.poll(backoff.DefaultInitialInterval, maxPollingTime) + + // using nil for cache config will use the default values specified in internal/source/gitlab/cache/cache.go#12 + return g, nil +} + +// GetDomain return a representation of a domain that we have fetched from +// GitLab +func (g *Gitlab) GetDomain(name string) (*domain.Domain, error) { + lookup := g.client.Resolve(context.Background(), name) + + if lookup.Error != nil { + return nil, lookup.Error + } + + // TODO introduce a second-level cache for domains, invalidate using etags + // from first-level cache + d := domain.New(name, lookup.Domain.Certificate, lookup.Domain.Key, g) + + return d, nil +} + +// Resolve is supposed to return the serving request containing lookup path, +// subpath for a given lookup and the serving itself created based on a request +// from GitLab pages domains source +func (g *Gitlab) Resolve(r *http.Request) (*serving.Request, error) { + host := request.GetHostWithoutPort(r) + + response := g.client.Resolve(r.Context(), host) + if response.Error != nil { + return nil, response.Error + } + + urlPath := path.Clean(r.URL.Path) + size := len(response.Domain.LookupPaths) + + for _, lookup := range response.Domain.LookupPaths { + isSubPath := strings.HasPrefix(urlPath, lookup.Prefix) + isRootPath := urlPath == path.Clean(lookup.Prefix) + + if isSubPath || isRootPath { + subPath := "" + if isSubPath { + subPath = strings.TrimPrefix(urlPath, lookup.Prefix) + } + + return &serving.Request{ + Serving: fabricateServing(lookup), + LookupPath: fabricateLookupPath(size, lookup), + SubPath: subPath}, nil + } + } + + return nil, domain.ErrDomainDoesNotExist +} + +// IsReady returns the value of Gitlab `isReady` which is updated by `Poll`. +func (g *Gitlab) IsReady() bool { + g.mu.RLock() + defer g.mu.RUnlock() + + return g.isReady +} diff --git a/internal/source/gitlab/gitlab_poll.go b/internal/source/gitlab/gitlab_poll.go new file mode 100644 index 000000000..0284449a7 --- /dev/null +++ b/internal/source/gitlab/gitlab_poll.go @@ -0,0 +1,40 @@ +package gitlab + +import ( + "time" + + "github.com/cenkalti/backoff/v4" + log "github.com/sirupsen/logrus" +) + +const ( + // maxPollingTime is the maximum duration to try to call the Status API + maxPollingTime = 60 * time.Minute +) + +// Poll tries to call the /internal/pages/status API endpoint once plus +// for `maxElapsedTime` +// TODO: Remove in https://gitlab.com/gitlab-org/gitlab-pages/-/issues/449 +func (g *Gitlab) poll(interval, maxElapsedTime time.Duration) { + backOff := backoff.NewExponentialBackOff() + backOff.InitialInterval = interval + backOff.MaxElapsedTime = maxElapsedTime + + operation := func() error { + log.Info("Checking GitLab internal API availability") + + return g.client.Status() + } + + err := backoff.Retry(operation, backOff) + if err != nil { + log.WithError(err).Errorf("Failed to connect to the internal GitLab API after %.2fs", maxElapsedTime.Seconds()) + return + } + + g.mu.Lock() + g.isReady = true + g.mu.Unlock() + + log.Info("GitLab internal pages status API connected successfully") +} diff --git a/internal/source/gitlab/gitlab_poll_test.go b/internal/source/gitlab/gitlab_poll_test.go new file mode 100644 index 000000000..8eecf210f --- /dev/null +++ b/internal/source/gitlab/gitlab_poll_test.go @@ -0,0 +1,80 @@ +package gitlab + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/client" +) + +func TestClient_Poll(t *testing.T) { + hook := test.NewGlobal() + tests := []struct { + name string + retries int + maxTime time.Duration + expectedFail bool + }{ + { + name: "success_with_no_retry", + retries: 0, + maxTime: 10 * time.Millisecond, + expectedFail: false, + }, + { + name: "success_after_N_retries", + retries: 3, + maxTime: 30 * time.Millisecond, + expectedFail: false, + }, + { + name: "fail_with_no_retries", + retries: 0, + maxTime: 10 * time.Millisecond, + expectedFail: true, + }, + { + name: "fail_after_N_retries", + retries: 3, + maxTime: 30 * time.Millisecond, + expectedFail: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer hook.Reset() + var counter int + client := client.StubClient{StatusErr: func() error { + if tt.expectedFail { + return fmt.Errorf(client.ConnectionErrorMsg) + } + + if counter < tt.retries { + counter++ + return fmt.Errorf(client.ConnectionErrorMsg) + } + + return nil + }} + + glClient := Gitlab{client: client, mu: &sync.RWMutex{}} + + glClient.poll(3*time.Millisecond, tt.maxTime) + if tt.expectedFail { + require.False(t, glClient.isReady) + + s := fmt.Sprintf("Failed to connect to the internal GitLab API after %.2fs", tt.maxTime.Seconds()) + require.Equal(t, s, hook.LastEntry().Message) + return + } + + require.True(t, glClient.isReady) + require.Equal(t, "GitLab internal pages status API connected successfully", hook.LastEntry().Message) + }) + } +} diff --git a/internal/source/gitlab/gitlab_test.go b/internal/source/gitlab/gitlab_test.go new file mode 100644 index 000000000..e6f194eee --- /dev/null +++ b/internal/source/gitlab/gitlab_test.go @@ -0,0 +1,100 @@ +package gitlab + +import ( + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/client" +) + +func TestGetDomain(t *testing.T) { + t.Run("when the response if correct", func(t *testing.T) { + client := client.StubClient{File: "client/testdata/test.gitlab.io.json"} + source := Gitlab{client: client} + + domain, err := source.GetDomain("test.gitlab.io") + require.NoError(t, err) + + require.Equal(t, "test.gitlab.io", domain.Name) + }) + + t.Run("when the response is not valid", func(t *testing.T) { + client := client.StubClient{File: "/dev/null"} + source := Gitlab{client: client} + + domain, err := source.GetDomain("test.gitlab.io") + + require.NotNil(t, err) + require.Nil(t, domain) + }) +} + +func TestResolve(t *testing.T) { + client := client.StubClient{File: "client/testdata/test.gitlab.io.json"} + source := Gitlab{client: client} + + t.Run("when requesting nested group project with root path", func(t *testing.T) { + target := "https://test.gitlab.io:443/my/pages/project/" + request := httptest.NewRequest("GET", target, nil) + + response, err := source.Resolve(request) + require.NoError(t, err) + + require.Equal(t, "/my/pages/project/", response.LookupPath.Prefix) + require.Equal(t, "some/path/to/project/", response.LookupPath.Path) + require.Equal(t, "", response.SubPath) + require.False(t, response.LookupPath.IsNamespaceProject) + }) + + t.Run("when requesting a nested group project with full path", func(t *testing.T) { + target := "https://test.gitlab.io:443/my/pages/project/path/index.html" + request := httptest.NewRequest("GET", target, nil) + + response, err := source.Resolve(request) + require.NoError(t, err) + + require.Equal(t, "/my/pages/project/", response.LookupPath.Prefix) + require.Equal(t, "some/path/to/project/", response.LookupPath.Path) + require.Equal(t, "path/index.html", response.SubPath) + require.False(t, response.LookupPath.IsNamespaceProject) + }) + + t.Run("when requesting the group root project with root path", func(t *testing.T) { + target := "https://test.gitlab.io:443/" + request := httptest.NewRequest("GET", target, nil) + + response, err := source.Resolve(request) + require.NoError(t, err) + + require.Equal(t, "/", response.LookupPath.Prefix) + require.Equal(t, "some/path/to/project-3/", response.LookupPath.Path) + require.Equal(t, "", response.SubPath) + require.True(t, response.LookupPath.IsNamespaceProject) + }) + + t.Run("when requesting the group root project with full path", func(t *testing.T) { + target := "https://test.gitlab.io:443/path/to/index.html" + request := httptest.NewRequest("GET", target, nil) + + response, err := source.Resolve(request) + require.NoError(t, err) + + require.Equal(t, "/", response.LookupPath.Prefix) + require.Equal(t, "path/to/index.html", response.SubPath) + require.Equal(t, "some/path/to/project-3/", response.LookupPath.Path) + require.True(t, response.LookupPath.IsNamespaceProject) + }) + + t.Run("when request path has not been sanitized", func(t *testing.T) { + target := "https://test.gitlab.io:443/something/../something/../my/pages/project/index.html" + request := httptest.NewRequest("GET", target, nil) + + response, err := source.Resolve(request) + require.NoError(t, err) + + require.Equal(t, "/my/pages/project/", response.LookupPath.Prefix) + require.Equal(t, "index.html", response.SubPath) + }) +} diff --git a/internal/source/source.go b/internal/source/source.go new file mode 100644 index 000000000..5540066c7 --- /dev/null +++ b/internal/source/source.go @@ -0,0 +1,9 @@ +package source + +import "gitlab.com/gitlab-org/gitlab-pages/internal/domain" + +// Source represents an abstract interface of a domains configuration source. +type Source interface { + GetDomain(string) (*domain.Domain, error) + IsReady() bool +} diff --git a/internal/source/source_mock.go b/internal/source/source_mock.go new file mode 100644 index 000000000..d7cd15333 --- /dev/null +++ b/internal/source/source_mock.go @@ -0,0 +1,36 @@ +package source + +import ( + "github.com/stretchr/testify/mock" + + "gitlab.com/gitlab-org/gitlab-pages/internal/domain" +) + +// MockSource can be used for testing +type MockSource struct { + mock.Mock +} + +// GetDomain is a mocked function +func (m *MockSource) GetDomain(name string) (*domain.Domain, error) { + args := m.Called(name) + err := args.Error(1) + + d, ok := args.Get(0).(*domain.Domain) + if !ok { + return nil, err + } + + return d, err +} + +func (m *MockSource) IsReady() bool { + args := m.Called() + + return args.Get(0).(bool) +} + +// NewMockSource returns a new Source mock for testing +func NewMockSource() *MockSource { + return &MockSource{} +} diff --git a/internal/testhelpers/chdir.go b/internal/testhelpers/chdir.go new file mode 100644 index 000000000..4015d8886 --- /dev/null +++ b/internal/testhelpers/chdir.go @@ -0,0 +1,29 @@ +package testhelpers + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func ChdirInPath(t testing.TB, path string, chdirSet *bool) func() { + t.Helper() + + if *chdirSet { + return func() {} + } + + cwd, err := os.Getwd() + require.NoError(t, err, "Cannot Getwd") + + require.NoError(t, os.Chdir(path), "Cannot Chdir") + + *chdirSet = true + return func() { + err := os.Chdir(cwd) + require.NoError(t, err, "Cannot Chdir in cleanup") + + *chdirSet = false + } +} diff --git a/internal/testhelpers/testhelpers.go b/internal/testhelpers/testhelpers.go new file mode 100644 index 000000000..422a3d9a6 --- /dev/null +++ b/internal/testhelpers/testhelpers.go @@ -0,0 +1,57 @@ +package testhelpers + +import ( + "mime" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +// AssertHTTP404 asserts handler returns 404 with provided str body +func AssertHTTP404(t *testing.T, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) { + w := httptest.NewRecorder() + req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) + require.NoError(t, err) + handler(w, req) + + require.Equal(t, http.StatusNotFound, w.Code, "HTTP status") + + if str != nil { + contentType, _, _ := mime.ParseMediaType(w.Header().Get("Content-Type")) + require.Equal(t, "text/html", contentType, "Content-Type") + require.Contains(t, w.Body.String(), str) + } +} + +// AssertRedirectTo asserts that handler redirects to particular URL +func AssertRedirectTo(t *testing.T, handler http.HandlerFunc, method string, + url string, values url.Values, expectedURL string) { + require.HTTPRedirect(t, handler, method, url, values) + + recorder := httptest.NewRecorder() + + req, _ := http.NewRequest(method, url, nil) + req.URL.RawQuery = values.Encode() + + handler(recorder, req) + + require.Equal(t, expectedURL, recorder.Header().Get("Location")) +} + +// AssertLogContains checks that wantLogEntry is contained in at least one of the log entries +func AssertLogContains(t *testing.T, wantLogEntry string, entries []*logrus.Entry) { + t.Helper() + + if wantLogEntry != "" { + messages := make([]string, len(entries)) + for k, entry := range entries { + messages[k] = entry.Message + } + + require.Contains(t, messages, wantLogEntry) + } +} diff --git a/internal/testhelpers/tmpdir.go b/internal/testhelpers/tmpdir.go new file mode 100644 index 000000000..81f0781bb --- /dev/null +++ b/internal/testhelpers/tmpdir.go @@ -0,0 +1,38 @@ +package testhelpers + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs/local" +) + +var fs = vfs.Instrumented(&local.VFS{}) + +func TmpDir(t *testing.T, pattern string) (vfs.Root, string, func()) { + tmpDir, err := ioutil.TempDir("", pattern) + if t != nil { + require.NoError(t, err) + } + + // On some systems `/tmp` can be a symlink + tmpDir, err = filepath.EvalSymlinks(tmpDir) + if t != nil { + require.NoError(t, err) + } + + root, err := fs.Root(context.Background(), tmpDir) + if t != nil { + require.NoError(t, err) + } + + return root, tmpDir, func() { + os.RemoveAll(tmpDir) + } +} diff --git a/internal/tlsconfig/tlsconfig.go b/internal/tlsconfig/tlsconfig.go new file mode 100644 index 000000000..9babf3744 --- /dev/null +++ b/internal/tlsconfig/tlsconfig.go @@ -0,0 +1,102 @@ +package tlsconfig + +import ( + "crypto/tls" + "fmt" + "sort" + "strings" +) + +// GetCertificateFunc returns the certificate to be used for given domain +type GetCertificateFunc func(*tls.ClientHelloInfo) (*tls.Certificate, error) + +var ( + preferredCipherSuites = []uint16{ + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_AES_128_GCM_SHA256, + tls.TLS_AES_256_GCM_SHA384, + tls.TLS_CHACHA20_POLY1305_SHA256, + } + + // AllTLSVersions has all supported flag values + AllTLSVersions = map[string]uint16{ + "": 0, // Default value in tls.Config + "ssl3": tls.VersionSSL30, + "tls1.0": tls.VersionTLS10, + "tls1.1": tls.VersionTLS11, + "tls1.2": tls.VersionTLS12, + "tls1.3": tls.VersionTLS13, + } +) + +// FlagUsage returns string with explanation how to use the CLI flag +func FlagUsage(minOrMax string) string { + versions := []string{} + + for version := range AllTLSVersions { + if version != "" { + versions = append(versions, fmt.Sprintf("%q", version)) + } + } + sort.Strings(versions) + + return fmt.Sprintf("Specifies the "+minOrMax+"imum SSL/TLS version, supported values are %s", strings.Join(versions, ", ")) +} + +// Create returns tls.Config for given app configuration +func Create(cert, key []byte, getCertificate GetCertificateFunc, insecureCiphers bool, tlsMinVersion uint16, tlsMaxVersion uint16) (*tls.Config, error) { + tlsConfig := &tls.Config{GetCertificate: getCertificate} + + err := configureCertificate(tlsConfig, cert, key) + if err != nil { + return nil, err + } + + if !insecureCiphers { + configureTLSCiphers(tlsConfig) + } + + tlsConfig.MinVersion = tlsMinVersion + tlsConfig.MaxVersion = tlsMaxVersion + + return tlsConfig, nil +} + +// ValidateTLSVersions returns error if the provided TLS versions config values are not valid +func ValidateTLSVersions(min, max string) error { + tlsMin, tlsMinOk := AllTLSVersions[min] + tlsMax, tlsMaxOk := AllTLSVersions[max] + + if !tlsMinOk { + return fmt.Errorf("invalid minimum TLS version: %s", min) + } + if !tlsMaxOk { + return fmt.Errorf("invalid maximum TLS version: %s", max) + } + if tlsMin > tlsMax && tlsMax > 0 { + return fmt.Errorf("invalid maximum TLS version: %s; should be at least %s", max, min) + } + + return nil +} + +func configureCertificate(tlsConfig *tls.Config, cert, key []byte) error { + certificate, err := tls.X509KeyPair(cert, key) + if err != nil { + return err + } + + tlsConfig.Certificates = []tls.Certificate{certificate} + + return nil +} + +func configureTLSCiphers(tlsConfig *tls.Config) { + tlsConfig.PreferServerCipherSuites = true + tlsConfig.CipherSuites = preferredCipherSuites +} diff --git a/internal/tlsconfig/tlsconfig_test.go b/internal/tlsconfig/tlsconfig_test.go new file mode 100644 index 000000000..00a080667 --- /dev/null +++ b/internal/tlsconfig/tlsconfig_test.go @@ -0,0 +1,71 @@ +package tlsconfig + +import ( + "crypto/tls" + "testing" + + "github.com/stretchr/testify/require" +) + +var cert = []byte(`-----BEGIN CERTIFICATE----- +MIIBhTCCASugAwIBAgIQIRi6zePL6mKjOipn+dNuaTAKBggqhkjOPQQDAjASMRAw +DgYDVQQKEwdBY21lIENvMB4XDTE3MTAyMDE5NDMwNloXDTE4MTAyMDE5NDMwNlow +EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD0d +7VNhbWvZLWPuj/RtHFjvtJBEwOkhbN/BnnE8rnZR8+sbwnc/KhCk3FhnpHZnQz7B +5aETbbIgmuvewdjvSBSjYzBhMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggr +BgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MCkGA1UdEQQiMCCCDmxvY2FsaG9zdDo1 +NDUzgg4xMjcuMC4wLjE6NTQ1MzAKBggqhkjOPQQDAgNIADBFAiEA2zpJEPQyz6/l +Wf86aX6PepsntZv2GYlA5UpabfT2EZICICpJ5h/iI+i341gBmLiAFQOyTDT+/wQc +6MF9+Yw1Yy0t +-----END CERTIFICATE-----`) + +var key = []byte(`-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIIrYSSNQFaA2Hwf1duRSxKtLYX5CB04fSeQ6tF1aY/PuoAoGCCqGSM49 +AwEHoUQDQgAEPR3tU2Fta9ktY+6P9G0cWO+0kETA6SFs38GecTyudlHz6xvCdz8q +EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== +-----END EC PRIVATE KEY-----`) + +var getCertificate = func(ch *tls.ClientHelloInfo) (*tls.Certificate, error) { + return nil, nil +} + +func TestValidateTLSVersions(t *testing.T) { + tests := map[string]struct { + tlsMin string + tlsMax string + err string + }{ + "invalid minimum TLS version": {tlsMin: "tls123", tlsMax: "", err: "invalid minimum TLS version: tls123"}, + "invalid maximum TLS version": {tlsMin: "", tlsMax: "tls123", err: "invalid maximum TLS version: tls123"}, + "TLS versions conflict": {tlsMin: "tls1.2", tlsMax: "tls1.1", err: "invalid maximum TLS version: tls1.1; should be at least tls1.2"}, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + err := ValidateTLSVersions(tc.tlsMin, tc.tlsMax) + require.EqualError(t, err, tc.err) + }) + } +} + +func TestInvalidKeyPair(t *testing.T) { + _, err := Create([]byte(``), []byte(``), getCertificate, false, tls.VersionTLS11, tls.VersionTLS12) + require.EqualError(t, err, "tls: failed to find any PEM data in certificate input") +} + +func TestInsecureCiphers(t *testing.T) { + tlsConfig, err := Create(cert, key, getCertificate, true, tls.VersionTLS11, tls.VersionTLS12) + require.NoError(t, err) + require.False(t, tlsConfig.PreferServerCipherSuites) + require.Empty(t, tlsConfig.CipherSuites) +} + +func TestCreate(t *testing.T) { + tlsConfig, err := Create(cert, key, getCertificate, false, tls.VersionTLS11, tls.VersionTLS12) + require.NoError(t, err) + require.IsType(t, getCertificate, tlsConfig.GetCertificate) + require.True(t, tlsConfig.PreferServerCipherSuites) + require.Equal(t, preferredCipherSuites, tlsConfig.CipherSuites) + require.Equal(t, uint16(tls.VersionTLS11), tlsConfig.MinVersion) + require.Equal(t, uint16(tls.VersionTLS12), tlsConfig.MaxVersion) +} diff --git a/internal/validateargs/validateargs.go b/internal/validateargs/validateargs.go new file mode 100644 index 000000000..3b75b69bf --- /dev/null +++ b/internal/validateargs/validateargs.go @@ -0,0 +1,41 @@ +package validateargs + +import ( + "fmt" + "strings" +) + +const ( + deprecatedMessage = "command line options have been deprecated:" + notAllowedMsg = "invalid command line arguments:" +) + +var deprecatedArgs = []string{"-sentry-dsn"} +var notAllowedArgs = []string{"-auth-client-id", "-auth-client-secret", "-auth-secret"} + +// Deprecated checks if deprecated params have been used +func Deprecated(args []string) error { + return validate(args, deprecatedArgs, deprecatedMessage) +} + +// NotAllowed checks if explicitly not allowed params have been used +func NotAllowed(args []string) error { + return validate(args, notAllowedArgs, notAllowedMsg) +} + +func validate(args, invalidArgs []string, errMsg string) error { + var foundInvalidArgs []string + + argsStr := strings.Join(args, " ") + for _, invalidArg := range invalidArgs { + if strings.Contains(argsStr, invalidArg) { + foundInvalidArgs = append(foundInvalidArgs, invalidArg) + } + } + + if len(foundInvalidArgs) > 0 { + return fmt.Errorf("%s %s", errMsg, strings.Join(foundInvalidArgs, ", ")) + } + + return nil +} diff --git a/internal/validateargs/validateargs_test.go b/internal/validateargs/validateargs_test.go new file mode 100644 index 000000000..7bf2ec105 --- /dev/null +++ b/internal/validateargs/validateargs_test.go @@ -0,0 +1,50 @@ +package validateargs + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestValidParams(t *testing.T) { + args := []string{"gitlab-pages", + "-listen-http", ":3010", + "-artifacts-server", "http://192.168.1.123:3000/api/v4", + "-pages-domain", "127.0.0.1.xip.io"} + require.NoError(t, Deprecated(args)) + require.NoError(t, NotAllowed(args)) +} + +func TestInvalidDeprecatedParms(t *testing.T) { + tests := map[string][]string{ + "Sentry DSN passed": []string{"gitlab-pages", "-sentry-dsn", "abc123"}, + "Sentry DSN using key=value": []string{"gitlab-pages", "-sentry-dsn=abc123"}, + } + + for name, args := range tests { + t.Run(name, func(t *testing.T) { + err := Deprecated(args) + require.Error(t, err) + require.Contains(t, err.Error(), deprecatedMessage) + }) + } +} + +func TestInvalidNotAllowedParams(t *testing.T) { + tests := map[string][]string{ + "Client ID passed": []string{"gitlab-pages", "-auth-client-id", "abc123"}, + "Client secret passed": []string{"gitlab-pages", "-auth-client-secret", "abc123"}, + "Auth secret passed": []string{"gitlab-pages", "-auth-secret", "abc123"}, + "Multiple keys passed": []string{"gitlab-pages", "-auth-client-id", "abc123", "-auth-client-secret", "abc123"}, + "key=value": []string{"gitlab-pages", "-auth-client-id=abc123"}, + "multiple key=value": []string{"gitlab-pages", "-auth-client-id=abc123", "-auth-client-secret=abc123"}, + } + + for name, args := range tests { + t.Run(name, func(t *testing.T) { + err := NotAllowed(args) + require.Error(t, err) + require.Contains(t, err.Error(), notAllowedMsg) + }) + } +} diff --git a/internal/vfs/errors.go b/internal/vfs/errors.go new file mode 100644 index 000000000..32b861925 --- /dev/null +++ b/internal/vfs/errors.go @@ -0,0 +1,18 @@ +package vfs + +import ( + "fmt" +) + +type ErrNotExist struct { + Inner error +} + +func (e ErrNotExist) Error() string { + return fmt.Sprintf("not exist: %q", e.Inner) +} + +func IsNotExist(err error) bool { + _, ok := err.(*ErrNotExist) + return ok +} diff --git a/internal/vfs/file.go b/internal/vfs/file.go new file mode 100644 index 000000000..47f683082 --- /dev/null +++ b/internal/vfs/file.go @@ -0,0 +1,15 @@ +package vfs + +import "io" + +// File represents an open file, which will typically be the response body of a Pages request. +type File interface { + io.Reader + io.Closer +} + +// SeekableFile represents a seekable file, which will typically be the response body of a Pages request. +type SeekableFile interface { + File + io.Seeker +} diff --git a/internal/vfs/local/root.go b/internal/vfs/local/root.go new file mode 100644 index 000000000..9cd67a9f0 --- /dev/null +++ b/internal/vfs/local/root.go @@ -0,0 +1,107 @@ +package local + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" + + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" +) + +var errNotFile = errors.New("path needs to be a file") + +type invalidPathError struct { + rootPath string + requestedPath string +} + +func (i *invalidPathError) Error() string { + return fmt.Sprintf("%q should be in %q", i.requestedPath, i.rootPath) +} + +type Root struct { + rootPath string +} + +func (r *Root) validatePath(path string) (string, string, error) { + fullPath := filepath.Join(r.rootPath, path) + + if r.rootPath == fullPath { + return fullPath, "", nil + } + + vfsPath := strings.TrimPrefix(fullPath, r.rootPath+"/") + + // The requested path resolved to somewhere outside of the `r.rootPath` directory + if fullPath == vfsPath { + return "", "", &invalidPathError{rootPath: r.rootPath, requestedPath: fullPath} + } + + return fullPath, vfsPath, nil +} + +func (r *Root) Lstat(ctx context.Context, name string) (os.FileInfo, error) { + fullPath, _, err := r.validatePath(name) + if err != nil { + return nil, err + } + + return os.Lstat(fullPath) +} + +func (r *Root) Readlink(ctx context.Context, name string) (string, error) { + fullPath, _, err := r.validatePath(name) + if err != nil { + return "", err + } + + target, err := os.Readlink(fullPath) + if err != nil { + return "", err + } + + // If `target` is local to `rootPath` return relative + if strings.HasPrefix(target, r.rootPath+"/") { + return filepath.Rel(filepath.Dir(fullPath), target) + } + + // If `target` is absolute return as-is making `EvalSymlinks` + // to discover misuse of a root path + if filepath.IsAbs(target) { + return target, nil + } + + // If `target` is relative, return as-is + return target, nil +} + +func (r *Root) Open(ctx context.Context, name string) (vfs.File, error) { + fullPath, _, err := r.validatePath(name) + if err != nil { + return nil, err + } + + file, err := os.OpenFile(fullPath, os.O_RDONLY|unix.O_NOFOLLOW, 0) + if err != nil { + return nil, err + } + + // We do a `Stat()` on a file due to race-conditions + // Someone could update (unlikely) a file between `Stat()/Open()` + fi, err := file.Stat() + if err != nil { + return nil, err + } + + if !fi.Mode().IsRegular() { + file.Close() + return nil, errNotFile + } + + return file, nil +} diff --git a/internal/vfs/local/root_test.go b/internal/vfs/local/root_test.go new file mode 100644 index 000000000..f89e7736d --- /dev/null +++ b/internal/vfs/local/root_test.go @@ -0,0 +1,296 @@ +package local + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidatePath(t *testing.T) { + ctx := context.Background() + rootVFS, err := localVFS.Root(ctx, ".") + require.NoError(t, err) + + root := rootVFS.(*Root) + + wd, err := os.Getwd() + require.NoError(t, err) + + tests := map[string]struct { + path string + expectedFullPath string + expectedVFSPath string + expectedInvalidPath bool + }{ + "a valid path": { + path: "testdata/link", + expectedFullPath: filepath.Join(wd, "testdata", "link"), + expectedVFSPath: filepath.Join("testdata", "link"), + }, + "a path outside of root directory": { + path: "testdata/../../link", + expectedInvalidPath: true, + }, + "an absolute path": { + // we don't support absolute paths, thus the `wd` will be preprended to `path` + path: filepath.Join(wd, "testdata", "link"), + expectedFullPath: filepath.Join(wd, wd, "testdata", "link"), + expectedVFSPath: filepath.Join(wd, "testdata", "link")[1:], // strip leading `/` + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + fullPath, vfsPath, err := root.validatePath(test.path) + + if test.expectedInvalidPath { + require.IsType(t, &invalidPathError{}, err, "InvalidPath") + return + } + + require.NoError(t, err, "validatePath") + assert.Equal(t, test.expectedFullPath, fullPath, "FullPath") + assert.Equal(t, test.expectedVFSPath, vfsPath, "VFSPath") + }) + } +} + +func TestReadlink(t *testing.T) { + ctx := context.Background() + root, err := localVFS.Root(ctx, ".") + require.NoError(t, err) + + tests := map[string]struct { + path string + expectedTarget string + expectedErr string + expectedInvalidPath bool + expectedIsNotExist bool + }{ + "a valid link": { + path: "testdata/link", + expectedTarget: "file", + }, + "a file": { + path: "testdata/file", + expectedErr: "invalid argument", + }, + "a path outside of root directory": { + path: "testdata/../../link", + expectedInvalidPath: true, + }, + "a non-existing link": { + path: "non-existing", + expectedIsNotExist: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + target, err := root.Readlink(ctx, test.path) + + if test.expectedIsNotExist { + require.Equal(t, test.expectedIsNotExist, os.IsNotExist(err), "IsNotExist") + return + } + + if test.expectedInvalidPath { + require.IsType(t, &invalidPathError{}, err, "InvalidPath") + return + } + + if test.expectedErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), test.expectedErr, "Readlink") + return + } + + require.NoError(t, err, "Readlink") + assert.Equal(t, test.expectedTarget, target, "target") + }) + } +} + +func TestReadlinkAbsolutePath(t *testing.T) { + // create structure as: + // /tmp/dir: directory + // /tmp/dir/symlink: points to `/tmp/file` outside of the `/tmp/dir` + // /tmp/dir/symlink2: points to `/tmp/dir/file` + tmpDir, cleanup := tmpDir(t) + defer cleanup() + + dirPath := filepath.Join(tmpDir, "dir") + err := os.Mkdir(dirPath, 0755) + require.NoError(t, err) + + symlinkPath := filepath.Join(dirPath, "symlink") + filePath := filepath.Join(tmpDir, "file") + err = os.Symlink(filePath, symlinkPath) + require.NoError(t, err) + + symlinkPath = filepath.Join(dirPath, "symlink2") + dirFilePath := filepath.Join(dirPath, "file") + err = os.Symlink(dirFilePath, symlinkPath) + require.NoError(t, err) + + root, err := localVFS.Root(context.Background(), dirPath) + require.NoError(t, err) + + tests := map[string]struct { + path string + expectedTarget string + }{ + "the absolute path is returned for file outside of `/tmp/dir": { + path: "symlink", + expectedTarget: filePath, + }, + "the relative path is returned for file inside the `/tmp/dir": { + path: "symlink2", + expectedTarget: "file", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + targetPath, err := root.Readlink(context.Background(), test.path) + require.NoError(t, err) + + assert.Equal(t, test.expectedTarget, targetPath) + }) + } +} + +func TestLstat(t *testing.T) { + ctx := context.Background() + root, err := localVFS.Root(ctx, ".") + require.NoError(t, err) + + tests := map[string]struct { + path string + modePerm os.FileMode + modeType os.FileMode + expectedInvalidPath bool + expectedIsNotExist bool + }{ + "a directory": { + path: "testdata", + modeType: os.ModeDir, + modePerm: 0755, + }, + "a file": { + path: "testdata/file", + modeType: os.FileMode(0), + modePerm: 0644, + }, + "a link": { + path: "testdata/link", + modeType: os.ModeSymlink, + // modePerm: Permissions of symlinks are platform dependent + }, + "a path outside of root directory": { + path: "testdata/../../link", + expectedInvalidPath: true, + }, + "a non-existing link": { + path: "non-existing", + expectedIsNotExist: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.modePerm > 0 { + require.NoError(t, os.Chmod(test.path, test.modePerm), "preparation: deterministic permissions") + } + + fi, err := root.Lstat(ctx, test.path) + + if test.expectedIsNotExist { + require.Equal(t, test.expectedIsNotExist, os.IsNotExist(err), "IsNotExist") + return + } + + if test.expectedInvalidPath { + require.IsType(t, &invalidPathError{}, err, "InvalidPath") + return + } + + require.NoError(t, err, "Lstat") + require.Equal(t, test.modeType, fi.Mode()&os.ModeType, "file mode: type") + if test.modePerm > 0 { + require.Equal(t, test.modePerm, fi.Mode()&os.ModePerm, "file mode: permissions") + } + }) + } +} + +func TestOpen(t *testing.T) { + ctx := context.Background() + root, err := localVFS.Root(ctx, ".") + require.NoError(t, err) + + tests := map[string]struct { + path string + expectedInvalidPath bool + expectedIsNotExist bool + expectedContent string + expectedErr string + }{ + "a file": { + path: "testdata/file", + expectedContent: "hello\n", + }, + "a directory": { + path: "testdata", + expectedErr: errNotFile.Error(), + }, + "a link": { + path: "testdata/link", + expectedErr: "too many levels of symbolic links", + }, + "a path outside of root directory": { + path: "testdata/../../link", + expectedInvalidPath: true, + }, + "a non-existing file": { + path: "non-existing", + expectedIsNotExist: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + file, err := root.Open(ctx, test.path) + if file != nil { + defer file.Close() + } + + if test.expectedIsNotExist { + require.Equal(t, test.expectedIsNotExist, os.IsNotExist(err), "IsNotExist") + return + } + + if test.expectedErr != "" { + require.Error(t, err, "Open") + require.Contains(t, err.Error(), test.expectedErr, "Open") + return + } + + if test.expectedInvalidPath { + require.IsType(t, &invalidPathError{}, err, "InvalidPath") + return + } + + require.NoError(t, err, "Open") + + data, err := ioutil.ReadAll(file) + require.NoError(t, err, "ReadAll") + require.Equal(t, test.expectedContent, string(data), "ReadAll") + }) + } +} diff --git a/internal/vfs/local/testdata/file b/internal/vfs/local/testdata/file new file mode 100644 index 000000000..ce0136250 --- /dev/null +++ b/internal/vfs/local/testdata/file @@ -0,0 +1 @@ +hello diff --git a/internal/vfs/local/testdata/link b/internal/vfs/local/testdata/link new file mode 120000 index 000000000..1a010b1c0 --- /dev/null +++ b/internal/vfs/local/testdata/link @@ -0,0 +1 @@ +file \ No newline at end of file diff --git a/internal/vfs/local/vfs.go b/internal/vfs/local/vfs.go new file mode 100644 index 000000000..ea54e8e83 --- /dev/null +++ b/internal/vfs/local/vfs.go @@ -0,0 +1,51 @@ +package local + +import ( + "context" + "errors" + "os" + "path/filepath" + + "gitlab.com/gitlab-org/gitlab-pages/internal/config" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" +) + +var errNotDirectory = errors.New("path needs to be a directory") + +type VFS struct{} + +func (fs VFS) Root(ctx context.Context, path string) (vfs.Root, error) { + rootPath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + rootPath, err = filepath.EvalSymlinks(rootPath) + if os.IsNotExist(err) { + return nil, &vfs.ErrNotExist{Inner: err} + } else if err != nil { + return nil, err + } + + fi, err := os.Lstat(rootPath) + if os.IsNotExist(err) { + return nil, &vfs.ErrNotExist{Inner: err} + } else if err != nil { + return nil, err + } + + if !fi.Mode().IsDir() { + return nil, errNotDirectory + } + + return &Root{rootPath: rootPath}, nil +} + +func (fs *VFS) Name() string { + return "local" +} + +func (fs *VFS) Reconfigure(*config.Config) error { + // noop + return nil +} diff --git a/internal/vfs/local/vfs_test.go b/internal/vfs/local/vfs_test.go new file mode 100644 index 000000000..b678cfa7a --- /dev/null +++ b/internal/vfs/local/vfs_test.go @@ -0,0 +1,117 @@ +package local + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" +) + +var localVFS = &VFS{} + +func tmpDir(t *testing.T) (string, func()) { + tmpDir, err := ioutil.TempDir("", "vfs") + require.NoError(t, err) + + // On some systems `/tmp` can be a symlink + tmpDir, err = filepath.EvalSymlinks(tmpDir) + require.NoError(t, err) + + return tmpDir, func() { + os.RemoveAll(tmpDir) + } +} + +func TestVFSRoot(t *testing.T) { + // create structure as: + // /tmp/dir: directory + // /tmp/dir_link: symlink to `dir` + // /tmp/dir_absolute_link: symlink to `/tmp/dir` + // /tmp/file: file + // /tmp/file_link: symlink to `file` + // /tmp/file_absolute_link: symlink to `/tmp/file` + tmpDir, cleanup := tmpDir(t) + defer cleanup() + + dirPath := filepath.Join(tmpDir, "dir") + err := os.Mkdir(dirPath, 0755) + require.NoError(t, err) + + filePath := filepath.Join(tmpDir, "file") + err = ioutil.WriteFile(filePath, []byte{}, 0644) + require.NoError(t, err) + + symlinks := map[string]string{ + "dir_link": "dir", + "dir_absolute_link": dirPath, + "file_link": "file", + "file_absolute_link": filePath, + } + + for dest, src := range symlinks { + err := os.Symlink(src, filepath.Join(tmpDir, dest)) + require.NoError(t, err) + } + + tests := map[string]struct { + path string + expectedPath string + expectedErr error + expectedIsNotExist bool + }{ + "a valid directory": { + path: "dir", + expectedPath: dirPath, + }, + "a symlink to directory": { + path: "dir_link", + expectedPath: dirPath, + }, + "a symlink to absolute directory": { + path: "dir_absolute_link", + expectedPath: dirPath, + }, + "a file": { + path: "file", + expectedErr: errNotDirectory, + }, + "a symlink to file": { + path: "file_link", + expectedErr: errNotDirectory, + }, + "a symlink to absolute file": { + path: "file_absolute_link", + expectedErr: errNotDirectory, + }, + "a non-existing file": { + path: "not-existing", + expectedIsNotExist: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rootVFS, err := localVFS.Root(context.Background(), filepath.Join(tmpDir, test.path)) + + if test.expectedIsNotExist { + require.Equal(t, test.expectedIsNotExist, vfs.IsNotExist(err)) + return + } + + if test.expectedErr != nil { + require.EqualError(t, err, test.expectedErr.Error()) + return + } + + require.NoError(t, err) + require.IsType(t, &Root{}, rootVFS) + assert.Equal(t, test.expectedPath, rootVFS.(*Root).rootPath) + }) + } +} diff --git a/internal/vfs/root.go b/internal/vfs/root.go new file mode 100644 index 000000000..30d97b0b7 --- /dev/null +++ b/internal/vfs/root.go @@ -0,0 +1,69 @@ +package vfs + +import ( + "context" + "os" + "strconv" + + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +// Root abstracts the things Pages needs to serve a static site from a given root rootPath. +type Root interface { + Lstat(ctx context.Context, name string) (os.FileInfo, error) + Readlink(ctx context.Context, name string) (string, error) + Open(ctx context.Context, name string) (File, error) +} + +type instrumentedRoot struct { + root Root + name string + rootPath string +} + +func (i *instrumentedRoot) increment(operation string, err error) { + metrics.VFSOperations.WithLabelValues(i.name, operation, strconv.FormatBool(err == nil)).Inc() +} + +func (i *instrumentedRoot) log() *log.Entry { + return log.WithField("vfs", i.name).WithField("root-path", i.rootPath) +} + +func (i *instrumentedRoot) Lstat(ctx context.Context, name string) (os.FileInfo, error) { + fi, err := i.root.Lstat(ctx, name) + + i.increment("Lstat", err) + i.log(). + WithField("name", name). + WithError(err). + Traceln("Lstat call") + + return fi, err +} + +func (i *instrumentedRoot) Readlink(ctx context.Context, name string) (string, error) { + target, err := i.root.Readlink(ctx, name) + + i.increment("Readlink", err) + i.log(). + WithField("name", name). + WithField("ret-target", target). + WithError(err). + Traceln("Readlink call") + + return target, err +} + +func (i *instrumentedRoot) Open(ctx context.Context, name string) (File, error) { + f, err := i.root.Open(ctx, name) + + i.increment("Open", err) + i.log(). + WithField("name", name). + WithError(err). + Traceln("Open call") + + return f, err +} diff --git a/internal/vfs/vfs.go b/internal/vfs/vfs.go new file mode 100644 index 000000000..2304f9034 --- /dev/null +++ b/internal/vfs/vfs.go @@ -0,0 +1,58 @@ +package vfs + +import ( + "context" + "strconv" + + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/gitlab-pages/internal/config" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +// VFS abstracts the things Pages needs to serve a static site from disk. +type VFS interface { + Root(ctx context.Context, path string) (Root, error) + Name() string + Reconfigure(config *config.Config) error +} + +func Instrumented(fs VFS) VFS { + return &instrumentedVFS{fs: fs} +} + +type instrumentedVFS struct { + fs VFS +} + +func (i *instrumentedVFS) increment(operation string, err error) { + metrics.VFSOperations.WithLabelValues(i.fs.Name(), operation, strconv.FormatBool(err == nil)).Inc() +} + +func (i *instrumentedVFS) log() *log.Entry { + return log.WithField("vfs", i.fs.Name()) +} + +func (i *instrumentedVFS) Root(ctx context.Context, path string) (Root, error) { + root, err := i.fs.Root(ctx, path) + + i.increment("Root", err) + i.log(). + WithField("path", path). + WithError(err). + Traceln("Root call") + + if err != nil { + return nil, err + } + + return &instrumentedRoot{root: root, name: i.fs.Name(), rootPath: path}, nil +} + +func (i *instrumentedVFS) Name() string { + return i.fs.Name() +} + +func (i *instrumentedVFS) Reconfigure(cfg *config.Config) error { + return i.fs.Reconfigure(cfg) +} diff --git a/internal/vfs/zip/archive.go b/internal/vfs/zip/archive.go new file mode 100644 index 000000000..9826cdd63 --- /dev/null +++ b/internal/vfs/zip/archive.go @@ -0,0 +1,308 @@ +package zip + +import ( + "archive/zip" + "context" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/gitlab-pages/internal/httprange" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +const ( + dirPrefix = "public/" + maxSymlinkSize = 256 +) + +var ( + errNotSymlink = errors.New("not a symlink") + errSymlinkSize = errors.New("symlink too long") + errNotFile = errors.New("not a file") +) + +type archiveStatus int + +const ( + archiveOpening archiveStatus = iota + archiveOpenError + archiveOpened + archiveCorrupted +) + +// zipArchive implements the vfs.Root interface. +// It represents a zip archive saving all its files in memory. +// It holds an httprange.Resource that can be read with httprange.RangedReader in chunks. +type zipArchive struct { + fs *zipVFS + + once sync.Once + done chan struct{} + openTimeout time.Duration + + cacheNamespace string + + resource *httprange.Resource + reader *httprange.RangedReader + archive *zip.Reader + err error + + files map[string]*zip.File + directories map[string]*zip.FileHeader +} + +func newArchive(fs *zipVFS, openTimeout time.Duration) *zipArchive { + return &zipArchive{ + fs: fs, + done: make(chan struct{}), + files: make(map[string]*zip.File), + directories: make(map[string]*zip.FileHeader), + openTimeout: openTimeout, + cacheNamespace: strconv.FormatInt(atomic.AddInt64(&fs.archiveCount, 1), 10) + ":", + } +} + +func (a *zipArchive) openArchive(parentCtx context.Context, url string) (err error) { + // always try to update URL on resource + if a.resource != nil { + a.resource.SetURL(url) + } + + // return early if openArchive was done already in a concurrent request + if status, err := a.openStatus(); status != archiveOpening { + return err + } + + ctx, cancel := context.WithTimeout(parentCtx, a.openTimeout) + defer cancel() + + a.once.Do(func() { + // read archive once in its own routine with its own timeout + // if parentCtx is canceled, readArchive will continue regardless and will be cached in memory + go a.readArchive(url) + }) + + // wait for readArchive to be done or return if the parent context is canceled + select { + case <-a.done: + return a.err + case <-ctx.Done(): + err := ctx.Err() + switch err { + case context.Canceled: + log.WithError(err).Traceln("open zip archive request canceled") + case context.DeadlineExceeded: + log.WithError(err).Traceln("open zip archive timed out") + } + + return err + } +} + +// readArchive creates an httprange.Resource that can read the archive's contents and stores a slice of *zip.Files +// that can be accessed later when calling any of th vfs.VFS operations +func (a *zipArchive) readArchive(url string) { + defer close(a.done) + + // readArchive with a timeout separate from openArchive's + ctx, cancel := context.WithTimeout(context.Background(), a.openTimeout) + defer cancel() + + a.resource, a.err = httprange.NewResource(ctx, url) + if a.err != nil { + metrics.ZipOpened.WithLabelValues("error").Inc() + return + } + + // load all archive files into memory using a cached ranged reader + a.reader = httprange.NewRangedReader(a.resource) + a.reader.WithCachedReader(ctx, func() { + a.archive, a.err = zip.NewReader(a.reader, a.resource.Size) + }) + + if a.archive == nil || a.err != nil { + metrics.ZipOpened.WithLabelValues("error").Inc() + return + } + + // TODO: Improve preprocessing of zip archives https://gitlab.com/gitlab-org/gitlab-pages/-/issues/432 + for _, file := range a.archive.File { + if !strings.HasPrefix(file.Name, dirPrefix) { + continue + } + + if file.Mode().IsDir() { + a.directories[file.Name] = &file.FileHeader + } else { + a.files[file.Name] = file + } + + a.addPathDirectory(file.Name) + } + + // recycle memory + a.archive.File = nil + + fileCount := float64(len(a.files)) + metrics.ZipOpened.WithLabelValues("ok").Inc() + metrics.ZipOpenedEntriesCount.Add(fileCount) + metrics.ZipArchiveEntriesCached.Add(fileCount) +} + +// addPathDirectory adds a directory for a given path +func (a *zipArchive) addPathDirectory(pathname string) { + // Split dir and file from `path` + pathname, _ = path.Split(pathname) + if pathname == "" { + return + } + + if a.directories[pathname] != nil { + return + } + + a.directories[pathname] = &zip.FileHeader{ + Name: pathname, + } +} + +func (a *zipArchive) findFile(name string) *zip.File { + name = path.Clean(dirPrefix + name) + + return a.files[name] +} + +func (a *zipArchive) findDirectory(name string) *zip.FileHeader { + name = path.Clean(dirPrefix + name) + + return a.directories[name+"/"] +} + +// Open finds the file by name inside the zipArchive and returns a reader that can be served by the VFS +func (a *zipArchive) Open(ctx context.Context, name string) (vfs.File, error) { + file := a.findFile(name) + if file == nil { + if a.findDirectory(name) != nil { + return nil, errNotFile + } + return nil, os.ErrNotExist + } + + if !file.Mode().IsRegular() { + return nil, errNotFile + } + + dataOffset, err := a.fs.dataOffsetCache.findOrFetch(a.cacheNamespace, name, func() (interface{}, error) { + return file.DataOffset() + }) + if err != nil { + return nil, err + } + + // only read from dataOffset up to the size of the compressed file + reader := a.reader.SectionReader(ctx, dataOffset.(int64), int64(file.CompressedSize64)) + + switch file.Method { + case zip.Deflate: + return newDeflateReader(reader), nil + case zip.Store: + return reader, nil + default: + return nil, fmt.Errorf("unsupported compression method: %x", file.Method) + } +} + +// Lstat finds the file by name inside the zipArchive and returns its FileInfo +func (a *zipArchive) Lstat(ctx context.Context, name string) (os.FileInfo, error) { + file := a.findFile(name) + if file != nil { + return file.FileInfo(), nil + } + + directory := a.findDirectory(name) + if directory != nil { + return directory.FileInfo(), nil + } + + return nil, os.ErrNotExist +} + +// ReadLink finds the file by name inside the zipArchive and returns the contents of the symlink +func (a *zipArchive) Readlink(ctx context.Context, name string) (string, error) { + file := a.findFile(name) + if file == nil { + if a.findDirectory(name) != nil { + return "", errNotSymlink + } + return "", os.ErrNotExist + } + + if file.FileInfo().Mode()&os.ModeSymlink != os.ModeSymlink { + return "", errNotSymlink + } + + symlinkValue, err := a.fs.readlinkCache.findOrFetch(a.cacheNamespace, name, func() (interface{}, error) { + rc, err := file.Open() + if err != nil { + return nil, err + } + defer rc.Close() + + var link [maxSymlinkSize + 1]byte + + // read up to len(symlink) bytes from the link file + n, err := io.ReadFull(rc, link[:]) + if err != nil && err != io.ErrUnexpectedEOF { + // if err == io.ErrUnexpectedEOF the link is smaller than len(symlink) so it's OK to not return it + return nil, err + } + + return string(link[:n]), nil + }) + if err != nil { + return "", err + } + + symlink := symlinkValue.(string) + + // return errSymlinkSize if the number of bytes read from the link is too big + if len(symlink) > maxSymlinkSize { + return "", errSymlinkSize + } + + return symlink, nil +} + +// onEvicted called by the zipVFS.cache when an archive is removed from the cache +func (a *zipArchive) onEvicted() { + metrics.ZipArchiveEntriesCached.Sub(float64(len(a.files))) +} + +func (a *zipArchive) openStatus() (archiveStatus, error) { + select { + case <-a.done: + if a.err != nil { + return archiveOpenError, a.err + } + + if a.resource != nil && a.resource.Err() != nil { + return archiveCorrupted, a.resource.Err() + } + + return archiveOpened, nil + + default: + return archiveOpening, nil + } +} diff --git a/internal/vfs/zip/archive_test.go b/internal/vfs/zip/archive_test.go new file mode 100644 index 000000000..58b7c74ab --- /dev/null +++ b/internal/vfs/zip/archive_test.go @@ -0,0 +1,474 @@ +package zip + +import ( + "archive/zip" + "bytes" + "context" + "crypto/rand" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strconv" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/config" + "gitlab.com/gitlab-org/gitlab-pages/internal/httprange" + "gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers" +) + +var ( + chdirSet = false + zipCfg = &config.ZipServing{ + ExpirationInterval: 10 * time.Second, + CleanupInterval: 5 * time.Second, + RefreshInterval: 5 * time.Second, + OpenTimeout: 5 * time.Second, + } +) + +func TestOpen(t *testing.T) { + zip, cleanup := openZipArchive(t, nil) + defer cleanup() + + tests := map[string]struct { + file string + expectedContent string + expectedErr error + }{ + "file_exists": { + file: "index.html", + expectedContent: "zip.gitlab.io/project/index.html\n", + expectedErr: nil, + }, + "file_exists_in_subdir": { + file: "subdir/hello.html", + expectedContent: "zip.gitlab.io/project/subdir/hello.html\n", + expectedErr: nil, + }, + "file_exists_symlink": { + file: "symlink.html", + expectedContent: "subdir/linked.html", + expectedErr: errNotFile, + }, + "is_dir": { + file: "subdir", + expectedErr: errNotFile, + }, + "file_does_not_exist": { + file: "unknown.html", + expectedErr: os.ErrNotExist, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + f, err := zip.Open(context.Background(), tt.file) + if tt.expectedErr != nil { + require.EqualError(t, err, tt.expectedErr.Error()) + return + } + + require.NoError(t, err) + data, err := ioutil.ReadAll(f) + require.NoError(t, err) + + require.Equal(t, tt.expectedContent, string(data)) + require.NoError(t, f.Close()) + }) + } +} + +func TestOpenCached(t *testing.T) { + var requests int64 + testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public-without-dirs.zip", &requests) + defer cleanup() + + fs := New(zipCfg) + + // We use array instead of map to ensure + // predictable ordering of test execution + tests := []struct { + name string + vfsPath string + filePath string + expectedArchiveStatus archiveStatus + expectedOpenErr error + expectedReadErr error + expectedRequests int64 + }{ + { + name: "open file first time", + vfsPath: testServerURL + "/public.zip", + filePath: "index.html", + // we expect five requests to: + // read resource and zip metadata + // read file: data offset and content + expectedRequests: 5, + expectedArchiveStatus: archiveOpened, + }, + { + name: "open file second time", + vfsPath: testServerURL + "/public.zip", + filePath: "index.html", + // we expect one request to read file with cached data offset + expectedRequests: 1, + expectedArchiveStatus: archiveOpened, + }, + { + name: "when the URL changes", + vfsPath: testServerURL + "/public.zip?new-secret", + filePath: "index.html", + expectedRequests: 1, + expectedArchiveStatus: archiveOpened, + }, + { + name: "when opening cached file and content changes", + vfsPath: testServerURL + "/public.zip?changed-content=1", + filePath: "index.html", + expectedRequests: 1, + // we receive an error on `read` as `open` offset is already cached + expectedReadErr: httprange.ErrRangeRequestsNotSupported, + expectedArchiveStatus: archiveCorrupted, + }, + { + name: "after content change archive is reloaded", + vfsPath: testServerURL + "/public.zip?new-secret", + filePath: "index.html", + expectedRequests: 5, + expectedArchiveStatus: archiveOpened, + }, + { + name: "when opening non-cached file and content changes", + vfsPath: testServerURL + "/public.zip?changed-content=1", + filePath: "subdir/hello.html", + expectedRequests: 1, + // we receive an error on `read` as `open` offset is already cached + expectedOpenErr: httprange.ErrRangeRequestsNotSupported, + expectedArchiveStatus: archiveCorrupted, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + start := atomic.LoadInt64(&requests) + zip, err := fs.Root(context.Background(), test.vfsPath) + require.NoError(t, err) + + f, err := zip.Open(context.Background(), test.filePath) + if test.expectedOpenErr != nil { + require.Equal(t, test.expectedOpenErr, err) + status, _ := zip.(*zipArchive).openStatus() + require.Equal(t, test.expectedArchiveStatus, status) + return + } + + require.NoError(t, err) + defer f.Close() + + _, err = ioutil.ReadAll(f) + if test.expectedReadErr != nil { + require.Equal(t, test.expectedReadErr, err) + status, _ := zip.(*zipArchive).openStatus() + require.Equal(t, test.expectedArchiveStatus, status) + return + } + + require.NoError(t, err) + status, _ := zip.(*zipArchive).openStatus() + require.Equal(t, test.expectedArchiveStatus, status) + + end := atomic.LoadInt64(&requests) + require.Equal(t, test.expectedRequests, end-start) + }) + } +} + +func TestLstat(t *testing.T) { + zip, cleanup := openZipArchive(t, nil) + defer cleanup() + + tests := map[string]struct { + file string + isDir bool + isSymlink bool + expectedName string + expectedErr error + }{ + "file_exists": { + file: "index.html", + expectedName: "index.html", + }, + "file_exists_in_subdir": { + file: "subdir/hello.html", + expectedName: "hello.html", + }, + "file_exists_symlink": { + file: "symlink.html", + isSymlink: true, + expectedName: "symlink.html", + }, + "has_root": { + file: "", + isDir: true, + expectedName: "public", + }, + "has_root_dot": { + file: ".", + isDir: true, + expectedName: "public", + }, + "has_root_slash": { + file: "/", + isDir: true, + expectedName: "public", + }, + "is_dir": { + file: "subdir", + isDir: true, + expectedName: "subdir", + }, + "file_does_not_exist": { + file: "unknown.html", + expectedErr: os.ErrNotExist, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + fi, err := zip.Lstat(context.Background(), tt.file) + if tt.expectedErr != nil { + require.EqualError(t, err, tt.expectedErr.Error()) + return + } + + require.NoError(t, err) + require.Equal(t, tt.expectedName, fi.Name()) + require.Equal(t, tt.isDir, fi.IsDir()) + require.NotEmpty(t, fi.ModTime()) + + if tt.isDir { + require.Zero(t, fi.Size()) + require.True(t, fi.IsDir()) + return + } + + require.NotZero(t, fi.Size()) + + if tt.isSymlink { + require.NotZero(t, fi.Mode()&os.ModeSymlink) + } else { + require.True(t, fi.Mode().IsRegular()) + } + }) + } +} + +func TestReadLink(t *testing.T) { + zip, cleanup := openZipArchive(t, nil) + defer cleanup() + + tests := map[string]struct { + file string + expectedErr error + }{ + "symlink_success": { + file: "symlink.html", + }, + "file": { + file: "index.html", + expectedErr: errNotSymlink, + }, + "dir": { + file: "subdir", + expectedErr: errNotSymlink, + }, + "symlink_too_big": { + file: "bad_symlink.html", + expectedErr: errSymlinkSize, + }, + "file_does_not_exist": { + file: "unknown.html", + expectedErr: os.ErrNotExist, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + link, err := zip.Readlink(context.Background(), tt.file) + if tt.expectedErr != nil { + require.EqualError(t, err, tt.expectedErr.Error()) + return + } + + require.NoError(t, err) + require.NotEmpty(t, link) + }) + } +} + +func TestReadlinkCached(t *testing.T) { + var requests int64 + zip, cleanup := openZipArchive(t, &requests) + defer cleanup() + + t.Run("readlink first time", func(t *testing.T) { + requestsStart := atomic.LoadInt64(&requests) + _, err := zip.Readlink(context.Background(), "symlink.html") + require.NoError(t, err) + require.Equal(t, int64(2), atomic.LoadInt64(&requests)-requestsStart, "we expect two requests to read symlink: data offset and link") + }) + + t.Run("readlink second time", func(t *testing.T) { + requestsStart := atomic.LoadInt64(&requests) + _, err := zip.Readlink(context.Background(), "symlink.html") + require.NoError(t, err) + require.Equal(t, int64(0), atomic.LoadInt64(&requests)-requestsStart, "we expect no additional requests to read cached symlink") + }) +} + +func TestArchiveCanBeReadAfterOpenCtxCanceled(t *testing.T) { + testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil) + defer cleanup() + + fs := New(zipCfg).(*zipVFS) + zip := newArchive(fs, time.Second) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + err := zip.openArchive(ctx, testServerURL+"/public.zip") + require.EqualError(t, err, context.Canceled.Error()) + + <-zip.done + + file, err := zip.Open(context.Background(), "index.html") + require.NoError(t, err) + data, err := ioutil.ReadAll(file) + require.NoError(t, err) + + require.Equal(t, "zip.gitlab.io/project/index.html\n", string(data)) + require.NoError(t, file.Close()) +} + +func TestReadArchiveFails(t *testing.T) { + testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil) + defer cleanup() + + fs := New(zipCfg).(*zipVFS) + zip := newArchive(fs, time.Second) + + err := zip.openArchive(context.Background(), testServerURL+"/unkown.html") + require.Error(t, err) + require.Contains(t, err.Error(), httprange.ErrNotFound.Error()) + + _, err = zip.Open(context.Background(), "index.html") + require.EqualError(t, err, os.ErrNotExist.Error()) +} + +func openZipArchive(t *testing.T, requests *int64) (*zipArchive, func()) { + t.Helper() + + if requests == nil { + requests = new(int64) + } + + testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public-without-dirs.zip", requests) + + fs := New(zipCfg).(*zipVFS) + zip := newArchive(fs, time.Second) + + err := zip.openArchive(context.Background(), testServerURL+"/public.zip") + require.NoError(t, err) + + // public/ public/index.html public/404.html public/symlink.html + // public/subdir/ public/subdir/hello.html public/subdir/linked.html + // public/bad_symlink.html public/subdir/2bp3Qzs... + require.NotZero(t, zip.files) + require.Equal(t, int64(3), atomic.LoadInt64(requests), "we expect three requests to open ZIP archive: size and two to seek central directory") + + return zip, func() { + cleanup() + } +} + +func newZipFileServerURL(t *testing.T, zipFilePath string, requests *int64) (string, func()) { + t.Helper() + + chdir := testhelpers.ChdirInPath(t, "../../../shared/pages", &chdirSet) + + m := http.NewServeMux() + m.HandleFunc("/public.zip", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if requests != nil { + atomic.AddInt64(requests, 1) + } + + r.ParseForm() + + if changedContent := r.Form.Get("changed-content"); changedContent != "" { + w.WriteHeader(http.StatusRequestedRangeNotSatisfiable) + return + } + + http.ServeFile(w, r, zipFilePath) + })) + + testServer := httptest.NewServer(m) + + return testServer.URL, func() { + chdir() + testServer.Close() + } +} + +func benchmarkArchiveRead(b *testing.B, size int64) { + zbuf := new(bytes.Buffer) + + // create zip file of specified size + zw := zip.NewWriter(zbuf) + w, err := zw.Create("public/file.txt") + require.NoError(b, err) + _, err = io.CopyN(w, rand.Reader, size) + require.NoError(b, err) + require.NoError(b, zw.Close()) + + modtime := time.Now().Add(-time.Hour) + + m := http.NewServeMux() + m.HandleFunc("/public.zip", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.ServeContent(w, r, "public.zip", modtime, bytes.NewReader(zbuf.Bytes())) + })) + + ts := httptest.NewServer(m) + defer ts.Close() + + fs := New(zipCfg).(*zipVFS) + + b.ReportAllocs() + b.ResetTimer() + for n := 0; n < b.N; n++ { + z := newArchive(fs, time.Second) + err := z.openArchive(context.Background(), ts.URL+"/public.zip") + require.NoError(b, err) + + f, err := z.Open(context.Background(), "file.txt") + require.NoError(b, err) + + _, err = io.Copy(ioutil.Discard, f) + require.NoError(b, err) + + require.NoError(b, f.Close()) + } +} + +func BenchmarkArchiveRead(b *testing.B) { + for _, size := range []int{32 * 1024, 64 * 1024, 1024 * 1024} { + b.Run(strconv.Itoa(size), func(b *testing.B) { + benchmarkArchiveRead(b, int64(size)) + }) + } +} diff --git a/internal/vfs/zip/deflate_reader.go b/internal/vfs/zip/deflate_reader.go new file mode 100644 index 000000000..87a7da0c6 --- /dev/null +++ b/internal/vfs/zip/deflate_reader.go @@ -0,0 +1,66 @@ +package zip + +import ( + "bufio" + "compress/flate" + "errors" + "io" + "sync" +) + +var ErrClosedReader = errors.New("deflatereader: reader is closed") + +var deflateReaderPool sync.Pool + +// deflateReader wrapper to support reading compressed files. +// Implements the io.ReadCloser interface. +type deflateReader struct { + reader *bufio.Reader + closer io.Closer + flateReader io.ReadCloser +} + +// Read from flateReader +func (r *deflateReader) Read(p []byte) (n int, err error) { + if r.closer == nil { + return 0, ErrClosedReader + } + + return r.flateReader.Read(p) +} + +// Close all readers +func (r *deflateReader) Close() error { + if r.closer == nil { + return ErrClosedReader + } + + defer func() { + r.closer.Close() + r.closer = nil + deflateReaderPool.Put(r) + }() + + return r.flateReader.Close() +} + +func (r *deflateReader) reset(rc io.ReadCloser) { + r.reader.Reset(rc) + r.closer = rc + r.flateReader.(flate.Resetter).Reset(r.reader, nil) +} + +func newDeflateReader(r io.ReadCloser) *deflateReader { + if dr, ok := deflateReaderPool.Get().(*deflateReader); ok { + dr.reset(r) + return dr + } + + br := bufio.NewReader(r) + + return &deflateReader{ + reader: br, + closer: r, + flateReader: flate.NewReader(br), + } +} diff --git a/internal/vfs/zip/lru_cache.go b/internal/vfs/zip/lru_cache.go new file mode 100644 index 000000000..9810e2453 --- /dev/null +++ b/internal/vfs/zip/lru_cache.go @@ -0,0 +1,62 @@ +package zip + +import ( + "time" + + "github.com/karlseguin/ccache/v2" + + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +// lruCacheGetPerPromote is a value that makes the item to be promoted +// it is taken arbitrally as a sane value indicating that the item +// was frequently picked +// promotion moves the item to the front of the LRU list +const lruCacheGetsPerPromote = 64 + +// lruCacheItemsToPruneDiv is a value that indicates how much items +// needs to be pruned on OOM, this prunes 1/16 of items +const lruCacheItemsToPruneDiv = 16 + +type lruCache struct { + op string + duration time.Duration + cache *ccache.Cache +} + +func newLruCache(op string, maxEntries int64, duration time.Duration) *lruCache { + configuration := ccache.Configure() + configuration.MaxSize(maxEntries) + configuration.ItemsToPrune(uint32(maxEntries) / lruCacheItemsToPruneDiv) + configuration.GetsPerPromote(lruCacheGetsPerPromote) // if item gets requested frequently promote it + configuration.OnDelete(func(*ccache.Item) { + metrics.ZipCachedEntries.WithLabelValues(op).Dec() + }) + + return &lruCache{ + op: op, + cache: ccache.New(configuration), + duration: duration, + } +} + +func (c *lruCache) findOrFetch(cacheNamespace, key string, fetchFn func() (interface{}, error)) (interface{}, error) { + item := c.cache.Get(cacheNamespace + key) + + if item != nil && !item.Expired() { + metrics.ZipCacheRequests.WithLabelValues(c.op, "hit").Inc() + return item.Value(), nil + } + + value, err := fetchFn() + if err != nil { + metrics.ZipCacheRequests.WithLabelValues(c.op, "error").Inc() + return nil, err + } + + metrics.ZipCacheRequests.WithLabelValues(c.op, "miss").Inc() + metrics.ZipCachedEntries.WithLabelValues(c.op).Inc() + + c.cache.Set(cacheNamespace+key, value, c.duration) + return value, nil +} diff --git a/internal/vfs/zip/vfs.go b/internal/vfs/zip/vfs.go new file mode 100644 index 000000000..b27424c60 --- /dev/null +++ b/internal/vfs/zip/vfs.go @@ -0,0 +1,214 @@ +package zip + +import ( + "context" + "errors" + "net/url" + "sync" + "time" + + "github.com/patrickmn/go-cache" + + "gitlab.com/gitlab-org/gitlab-pages/internal/config" + "gitlab.com/gitlab-org/gitlab-pages/internal/httprange" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +const ( + // we assume that each item costs around 100 bytes + // this gives around 5MB of raw memory needed without acceleration structures + defaultDataOffsetItems = 50000 + defaultDataOffsetExpirationInterval = time.Hour + + // we assume that each item costs around 200 bytes + // this gives around 2MB of raw memory needed without acceleration structures + defaultReadlinkItems = 10000 + defaultReadlinkExpirationInterval = time.Hour +) + +var ( + errAlreadyCached = errors.New("archive already cached") +) + +// zipVFS is a simple cached implementation of the vfs.VFS interface +type zipVFS struct { + cache *cache.Cache + cacheLock sync.Mutex + + openTimeout time.Duration + cacheExpirationInterval time.Duration + cacheRefreshInterval time.Duration + cacheCleanupInterval time.Duration + + dataOffsetCache *lruCache + readlinkCache *lruCache + + archiveCount int64 +} + +// New creates a zipVFS instance that can be used by a serving request +func New(cfg *config.ZipServing) vfs.VFS { + zipVFS := &zipVFS{ + cacheExpirationInterval: cfg.ExpirationInterval, + cacheRefreshInterval: cfg.RefreshInterval, + cacheCleanupInterval: cfg.CleanupInterval, + openTimeout: cfg.OpenTimeout, + } + + zipVFS.resetCache() + + // TODO: To be removed with https://gitlab.com/gitlab-org/gitlab-pages/-/issues/480 + zipVFS.dataOffsetCache = newLruCache("data-offset", defaultDataOffsetItems, defaultDataOffsetExpirationInterval) + zipVFS.readlinkCache = newLruCache("readlink", defaultReadlinkItems, defaultReadlinkExpirationInterval) + + return zipVFS +} + +// Reconfigure will update the zipVFS configuration values and will reset the +// cache +func (fs *zipVFS) Reconfigure(cfg *config.Config) error { + fs.cacheLock.Lock() + defer fs.cacheLock.Unlock() + + fs.openTimeout = cfg.Zip.OpenTimeout + fs.cacheExpirationInterval = cfg.Zip.ExpirationInterval + fs.cacheRefreshInterval = cfg.Zip.RefreshInterval + fs.cacheCleanupInterval = cfg.Zip.CleanupInterval + + fs.resetCache() + + return nil +} + +func (fs *zipVFS) resetCache() { + fs.cache = cache.New(fs.cacheExpirationInterval, fs.cacheCleanupInterval) + fs.cache.OnEvicted(func(s string, i interface{}) { + metrics.ZipCachedEntries.WithLabelValues("archive").Dec() + + i.(*zipArchive).onEvicted() + }) +} + +func (fs *zipVFS) keyFromPath(path string) (string, error) { + // We assume that our URL is https://.../artifacts.zip?content-sign=aaa + // our caching key is `https://.../artifacts.zip` + // TODO: replace caching key with file_sha256 + // https://gitlab.com/gitlab-org/gitlab-pages/-/issues/489 + key, err := url.Parse(path) + if err != nil { + return "", err + } + key.RawQuery = "" + key.Fragment = "" + return key.String(), nil +} + +// Root opens an archive given a URL path and returns an instance of zipArchive +// that implements the vfs.VFS interface. +// To avoid using locks, the findOrOpenArchive function runs inside of a for +// loop until an archive is either found or created and saved. +// If findOrOpenArchive returns errAlreadyCached, the for loop will continue +// to try and find the cached archive or return if there's an error, for example +// if the context is canceled. +func (fs *zipVFS) Root(ctx context.Context, path string) (vfs.Root, error) { + key, err := fs.keyFromPath(path) + if err != nil { + return nil, err + } + + // we do it in loop to not use any additional locks + for { + root, err := fs.findOrOpenArchive(ctx, key, path) + if err == errAlreadyCached { + continue + } + + // If archive is not found, return a known `vfs` error + if err == httprange.ErrNotFound { + err = &vfs.ErrNotExist{Inner: err} + } + + return root, err + } +} + +func (fs *zipVFS) Name() string { + return "zip" +} + +// findOrCreateArchive if found in fs.cache refresh if needed and return it. +// otherwise creates the archive entry in a cache and try to save it, +// if saving fails it's because the archive has already been cached +// (e.g. by another concurrent request) +func (fs *zipVFS) findOrCreateArchive(ctx context.Context, key string) (*zipArchive, error) { + // This needs to happen in lock to ensure that + // concurrent access will not remove it + // it is needed due to the bug https://github.com/patrickmn/go-cache/issues/48 + fs.cacheLock.Lock() + defer fs.cacheLock.Unlock() + + archive, expiry, found := fs.cache.GetWithExpiration(key) + if found { + status, _ := archive.(*zipArchive).openStatus() + switch status { + case archiveOpening: + metrics.ZipCacheRequests.WithLabelValues("archive", "hit-opening").Inc() + + case archiveOpenError: + // this means that archive is likely corrupted + // we keep it for duration of cache entry expiry (negative cache) + metrics.ZipCacheRequests.WithLabelValues("archive", "hit-open-error").Inc() + + case archiveOpened: + if time.Until(expiry) < fs.cacheRefreshInterval { + fs.cache.SetDefault(key, archive) + metrics.ZipCacheRequests.WithLabelValues("archive", "hit-refresh").Inc() + } else { + metrics.ZipCacheRequests.WithLabelValues("archive", "hit").Inc() + } + + case archiveCorrupted: + // this means that archive is likely changed + // we should invalidate it immediately + metrics.ZipCacheRequests.WithLabelValues("archive", "corrupted").Inc() + archive = nil + } + } + + if archive == nil { + archive = newArchive(fs, fs.openTimeout) + + // We call delete to ensure that expired item + // is properly evicted as there's a bug in a cache library: + // https://github.com/patrickmn/go-cache/issues/48 + fs.cache.Delete(key) + + // if adding the archive to the cache fails it means it's already been added before + // this is done to find concurrent additions. + if fs.cache.Add(key, archive, fs.cacheExpirationInterval) != nil { + metrics.ZipCacheRequests.WithLabelValues("archive", "already-cached").Inc() + return nil, errAlreadyCached + } + + metrics.ZipCacheRequests.WithLabelValues("archive", "miss").Inc() + metrics.ZipCachedEntries.WithLabelValues("archive").Inc() + } + + return archive.(*zipArchive), nil +} + +// findOrOpenArchive gets archive from cache and tries to open it +func (fs *zipVFS) findOrOpenArchive(ctx context.Context, key, path string) (*zipArchive, error) { + zipArchive, err := fs.findOrCreateArchive(ctx, key) + if err != nil { + return nil, err + } + + err = zipArchive.openArchive(ctx, path) + if err != nil { + return nil, err + } + + return zipArchive, nil +} diff --git a/internal/vfs/zip/vfs_test.go b/internal/vfs/zip/vfs_test.go new file mode 100644 index 000000000..ffda1fb6c --- /dev/null +++ b/internal/vfs/zip/vfs_test.go @@ -0,0 +1,226 @@ +package zip + +import ( + "context" + "io/ioutil" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/httprange" + "gitlab.com/gitlab-org/gitlab-pages/internal/vfs" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +func TestVFSRoot(t *testing.T) { + url, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil) + defer cleanup() + + tests := map[string]struct { + path string + expectedErrMsg string + }{ + "zip_file_exists": { + path: "/public.zip", + }, + "zip_file_does_not_exist": { + path: "/unknown", + expectedErrMsg: vfs.ErrNotExist{Inner: httprange.ErrNotFound}.Error(), + }, + "invalid_url": { + path: "/%", + expectedErrMsg: "invalid URL", + }, + } + + vfs := New(zipCfg) + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + root, err := vfs.Root(context.Background(), url+tt.path) + if tt.expectedErrMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedErrMsg) + return + } + + require.NoError(t, err) + require.IsType(t, &zipArchive{}, root) + + f, err := root.Open(context.Background(), "index.html") + require.NoError(t, err) + + content, err := ioutil.ReadAll(f) + require.NoError(t, err) + require.Equal(t, "zip.gitlab.io/project/index.html\n", string(content)) + + fi, err := root.Lstat(context.Background(), "index.html") + require.NoError(t, err) + require.Equal(t, "index.html", fi.Name()) + + link, err := root.Readlink(context.Background(), "symlink.html") + require.NoError(t, err) + require.Equal(t, "subdir/linked.html", link) + }) + } +} + +func TestVFSFindOrOpenArchiveConcurrentAccess(t *testing.T) { + testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil) + defer cleanup() + + path := testServerURL + "/public.zip" + + vfs := New(zipCfg).(*zipVFS) + root, err := vfs.Root(context.Background(), path) + require.NoError(t, err) + + done := make(chan struct{}) + defer close(done) + + // Try to hit a condition between the invocation + // of cache.GetWithExpiration and cache.Add + go func() { + for { + select { + case <-done: + return + + default: + vfs.cache.Flush() + vfs.cache.SetDefault(path, root) + } + } + }() + + require.Eventually(t, func() bool { + _, err := vfs.findOrOpenArchive(context.Background(), path, path) + return err == errAlreadyCached + }, 3*time.Second, time.Nanosecond) +} + +func TestVFSFindOrOpenArchiveRefresh(t *testing.T) { + testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil) + defer cleanup() + + // It should be large enough to not have flaky executions + const expiryInterval = 10 * time.Millisecond + + tests := map[string]struct { + path string + expirationInterval time.Duration + refreshInterval time.Duration + + expectNewArchive bool + expectOpenError bool + expectArchiveRefreshed bool + }{ + "after cache expiry of successful open a new archive is returned": { + path: "/public.zip", + expirationInterval: expiryInterval, + expectNewArchive: true, + expectOpenError: false, + }, + "after cache expiry of errored open a new archive is returned": { + path: "/unknown.zip", + expirationInterval: expiryInterval, + expectNewArchive: true, + expectOpenError: true, + }, + "subsequent open during refresh interval does refresh archive": { + path: "/public.zip", + expirationInterval: time.Second, + refreshInterval: time.Second, // refresh always + expectNewArchive: false, + expectOpenError: false, + expectArchiveRefreshed: true, + }, + "subsequent open before refresh interval does not refresh archive": { + path: "/public.zip", + expirationInterval: time.Second, + refreshInterval: time.Millisecond, // very short interval should not refresh + expectNewArchive: false, + expectOpenError: false, + expectArchiveRefreshed: false, + }, + "subsequent open of errored archive during refresh interval does not refresh": { + path: "/unknown.zip", + expirationInterval: time.Second, + refreshInterval: time.Second, // refresh always (if not error) + expectNewArchive: false, + expectOpenError: true, + expectArchiveRefreshed: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + withExpectedArchiveCount(t, 1, func(t *testing.T) { + cfg := *zipCfg + cfg.ExpirationInterval = test.expirationInterval + cfg.RefreshInterval = test.refreshInterval + + vfs := New(&cfg).(*zipVFS) + + path := testServerURL + test.path + + // create a new archive and increase counters + archive1, err1 := vfs.findOrOpenArchive(context.Background(), path, path) + if test.expectOpenError { + require.Error(t, err1) + require.Nil(t, archive1) + } else { + require.NoError(t, err1) + } + + item1, exp1, found := vfs.cache.GetWithExpiration(path) + require.True(t, found) + + // give some time to for timeouts to fire + time.Sleep(expiryInterval) + + if test.expectNewArchive { + // should return a new archive + archive2, err2 := vfs.findOrOpenArchive(context.Background(), path, path) + if test.expectOpenError { + require.Error(t, err2) + require.Nil(t, archive2) + } else { + require.NoError(t, err2) + require.NotEqual(t, archive1, archive2, "a new archive should be returned") + } + return + } + + // should return exactly the same archive + archive2, err2 := vfs.findOrOpenArchive(context.Background(), path, path) + require.Equal(t, archive1, archive2, "same archive is returned") + require.Equal(t, err1, err2, "same error for the same archive") + + item2, exp2, found := vfs.cache.GetWithExpiration(path) + require.True(t, found) + require.Equal(t, item1, item2, "same item is returned") + + if test.expectArchiveRefreshed { + require.Greater(t, exp2.UnixNano(), exp1.UnixNano(), "archive should be refreshed") + } else { + require.Equal(t, exp1.UnixNano(), exp2.UnixNano(), "archive has not been refreshed") + } + }) + }) + } +} + +func withExpectedArchiveCount(t *testing.T, archiveCount int, fn func(t *testing.T)) { + t.Helper() + + archivesMetric := metrics.ZipCachedEntries.WithLabelValues("archive") + archivesCount := testutil.ToFloat64(archivesMetric) + + fn(t) + + archivesCountEnd := testutil.ToFloat64(archivesMetric) + require.Equal(t, float64(archiveCount), archivesCountEnd-archivesCount, "exact number of archives is cached") +} diff --git a/main.go b/main.go new file mode 100644 index 000000000..ae6941ce1 --- /dev/null +++ b/main.go @@ -0,0 +1,451 @@ +package main + +import ( + "encoding/base64" + "fmt" + "io" + "math/rand" + "net/url" + "os" + "strings" + "time" + + "github.com/namsral/flag" + log "github.com/sirupsen/logrus" + + "gitlab.com/gitlab-org/labkit/errortracking" + + "gitlab.com/gitlab-org/gitlab-pages/internal/logging" + "gitlab.com/gitlab-org/gitlab-pages/internal/request" + "gitlab.com/gitlab-org/gitlab-pages/internal/tlsconfig" + "gitlab.com/gitlab-org/gitlab-pages/internal/validateargs" + "gitlab.com/gitlab-org/gitlab-pages/metrics" +) + +// VERSION stores the information about the semantic version of application +var VERSION = "dev" + +// REVISION stores the information about the git revision of application +var REVISION = "HEAD" + +func init() { + // TODO: move all flags to config pkg https://gitlab.com/gitlab-org/gitlab-pages/-/issues/507 + flag.Var(&listenHTTP, "listen-http", "The address(es) to listen on for HTTP requests") + flag.Var(&listenHTTPS, "listen-https", "The address(es) to listen on for HTTPS requests") + flag.Var(&listenProxy, "listen-proxy", "The address(es) to listen on for proxy requests") + flag.Var(&ListenHTTPSProxyv2, "listen-https-proxyv2", "The address(es) to listen on for HTTPS PROXYv2 requests (https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt)") + flag.Var(&header, "header", "The additional http header(s) that should be send to the client") +} + +var ( + // TODO: move all flags to config pkg https://gitlab.com/gitlab-org/gitlab-pages/-/issues/507 + pagesRootCert = flag.String("root-cert", "", "The default path to file certificate to serve static pages") + pagesRootKey = flag.String("root-key", "", "The default path to file certificate to serve static pages") + redirectHTTP = flag.Bool("redirect-http", false, "Redirect pages from HTTP to HTTPS") + useHTTP2 = flag.Bool("use-http2", true, "Enable HTTP2 support") + pagesRoot = flag.String("pages-root", "shared/pages", "The directory where pages are stored") + pagesDomain = flag.String("pages-domain", "gitlab-example.com", "The domain to serve static pages") + artifactsServer = flag.String("artifacts-server", "", "API URL to proxy artifact requests to, e.g.: 'https://gitlab.com/api/v4'") + artifactsServerTimeout = flag.Int("artifacts-server-timeout", 10, "Timeout (in seconds) for a proxied request to the artifacts server") + pagesStatus = flag.String("pages-status", "", "The url path for a status page, e.g., /@status") + metricsAddress = flag.String("metrics-address", "", "The address to listen on for metrics requests") + sentryDSN = flag.String("sentry-dsn", "", "The address for sending sentry crash reporting to") + sentryEnvironment = flag.String("sentry-environment", "", "The environment for sentry crash reporting") + daemonUID = flag.Uint("daemon-uid", 0, "Drop privileges to this user") + daemonGID = flag.Uint("daemon-gid", 0, "Drop privileges to this group") + daemonInplaceChroot = flag.Bool("daemon-inplace-chroot", false, "Fall back to a non-bind-mount chroot of -pages-root when daemonizing") + logFormat = flag.String("log-format", "text", "The log output format: 'text' or 'json'") + logVerbose = flag.Bool("log-verbose", false, "Verbose logging") + _ = flag.String("admin-secret-path", "", "DEPRECATED") + _ = flag.String("admin-unix-listener", "", "DEPRECATED") + _ = flag.String("admin-https-listener", "", "DEPRECATED") + _ = flag.String("admin-https-cert", "", "DEPRECATED") + _ = flag.String("admin-https-key", "", "DEPRECATED") + secret = flag.String("auth-secret", "", "Cookie store hash key, should be at least 32 bytes long") + gitLabAuthServer = flag.String("auth-server", "", "DEPRECATED, use gitlab-server instead. GitLab server, for example https://www.gitlab.com") + gitLabServer = flag.String("gitlab-server", "", "GitLab server, for example https://www.gitlab.com") + internalGitLabServer = flag.String("internal-gitlab-server", "", "Internal GitLab server used for API requests, useful if you want to send that traffic over an internal load balancer, example value https://www.gitlab.com (defaults to value of gitlab-server)") + gitLabAPISecretKey = flag.String("api-secret-key", "", "File with secret key used to authenticate with the GitLab API") + gitlabClientHTTPTimeout = flag.Duration("gitlab-client-http-timeout", 10*time.Second, "GitLab API HTTP client connection timeout in seconds (default: 10s)") + gitlabClientJWTExpiry = flag.Duration("gitlab-client-jwt-expiry", 30*time.Second, "JWT Token expiry time in seconds (default: 30s)") + // TODO: implement functionality for disk, auto and gitlab https://gitlab.com/gitlab-org/gitlab/-/issues/217912 + domainConfigSource = flag.String("domain-config-source", "auto", "Domain configuration source 'disk', 'auto' or 'gitlab' (default: 'auto')") + clientID = flag.String("auth-client-id", "", "GitLab application Client ID") + clientSecret = flag.String("auth-client-secret", "", "GitLab application Client Secret") + redirectURI = flag.String("auth-redirect-uri", "", "GitLab application redirect URI") + maxConns = flag.Uint("max-conns", 5000, "Limit on the number of concurrent connections to the HTTP, HTTPS or proxy listeners") + insecureCiphers = flag.Bool("insecure-ciphers", false, "Use default list of cipher suites, may contain insecure ones like 3DES and RC4") + tlsMinVersion = flag.String("tls-min-version", "tls1.2", tlsconfig.FlagUsage("min")) + tlsMaxVersion = flag.String("tls-max-version", "", tlsconfig.FlagUsage("max")) + // TODO: move all flags to config pkg https://gitlab.com/gitlab-org/gitlab-pages/-/issues/507 + zipCacheExpiration = flag.Duration("zip-cache-expiration", 60*time.Second, "Zip serving archive cache expiration interval") + zipCacheCleanup = flag.Duration("zip-cache-cleanup", 30*time.Second, "Zip serving archive cache cleanup interval") + zipCacheRefresh = flag.Duration("zip-cache-refresh", 30*time.Second, "Zip serving archive cache refresh interval") + zipOpenTimeout = flag.Duration("zip-open-timeout", 30*time.Second, "Zip archive open timeout") + + disableCrossOriginRequests = flag.Bool("disable-cross-origin-requests", false, "Disable cross-origin requests") + + // See init() + listenHTTP MultiStringFlag + listenHTTPS MultiStringFlag + listenProxy MultiStringFlag + ListenHTTPSProxyv2 MultiStringFlag + + header MultiStringFlag +) + +func gitlabServerFromFlags() string { + if *gitLabServer != "" { + return *gitLabServer + } + + if *gitLabAuthServer != "" { + log.Warn("auth-server parameter is deprecated, use gitlab-server instead") + return *gitLabAuthServer + } + + u, err := url.Parse(*artifactsServer) + if err != nil { + return "" + } + + u.Path = "" + return u.String() +} + +func internalGitLabServerFromFlags() string { + if *internalGitLabServer != "" { + return *internalGitLabServer + } + + return gitlabServerFromFlags() +} + +func setArtifactsServer(artifactsServer string, artifactsServerTimeout int, config *appConfig) { + u, err := url.Parse(artifactsServer) + if err != nil { + log.Fatal(err) + } + // url.Parse ensures that the Scheme attribute is always lower case. + if u.Scheme != request.SchemeHTTP && u.Scheme != request.SchemeHTTPS { + errortracking.Capture(err) + log.Fatal("artifacts-server scheme must be either http:// or https://") + } + + if artifactsServerTimeout < 1 { + errortracking.Capture(err) + log.Fatal("artifacts-server-timeout must be greater than or equal to 1") + } + + config.ArtifactsServerTimeout = artifactsServerTimeout + config.ArtifactsServer = artifactsServer +} + +func setGitLabAPISecretKey(secretFile string, config *appConfig) { + encoded := readFile(secretFile) + + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(encoded))) + secretLength, err := base64.StdEncoding.Decode(decoded, encoded) + if err != nil { + log.WithError(err).Fatal("Failed to decode GitLab API secret") + } + + if secretLength != 32 { + log.WithError(fmt.Errorf("expected 32 bytes GitLab API secret but got %d bytes", secretLength)).Fatal("Failed to decode GitLab API secret") + } + + config.GitLabAPISecretKey = decoded +} + +func configFromFlags() appConfig { + var config appConfig + + config.Domain = strings.ToLower(*pagesDomain) + config.RedirectHTTP = *redirectHTTP + config.HTTP2 = *useHTTP2 + config.DisableCrossOriginRequests = *disableCrossOriginRequests + config.StatusPath = *pagesStatus + config.LogFormat = *logFormat + config.LogVerbose = *logVerbose + config.MaxConns = int(*maxConns) + config.InsecureCiphers = *insecureCiphers + // tlsMinVersion and tlsMaxVersion are validated in appMain + config.TLSMinVersion = tlsconfig.AllTLSVersions[*tlsMinVersion] + config.TLSMaxVersion = tlsconfig.AllTLSVersions[*tlsMaxVersion] + config.CustomHeaders = header + + for _, file := range []struct { + contents *[]byte + path string + }{ + {&config.RootCertificate, *pagesRootCert}, + {&config.RootKey, *pagesRootKey}, + } { + if file.path != "" { + *file.contents = readFile(file.path) + } + } + + if *gitLabAPISecretKey != "" { + setGitLabAPISecretKey(*gitLabAPISecretKey, &config) + } + + if *artifactsServer != "" { + setArtifactsServer(*artifactsServer, *artifactsServerTimeout, &config) + } + + config.GitLabServer = gitlabServerFromFlags() + config.InternalGitLabServer = internalGitLabServerFromFlags() + config.GitlabClientHTTPTimeout = *gitlabClientHTTPTimeout + config.GitlabJWTTokenExpiration = *gitlabClientJWTExpiry + config.DomainConfigurationSource = *domainConfigSource + config.StoreSecret = *secret + config.ClientID = *clientID + config.ClientSecret = *clientSecret + config.RedirectURI = *redirectURI + config.SentryDSN = *sentryDSN + config.SentryEnvironment = *sentryEnvironment + + config.ZipCacheExpiry = *zipCacheExpiration + config.ZipCacheCleanup = *zipCacheCleanup + config.ZipCacheRefresh = *zipCacheRefresh + config.ZipeOpenTimeout = *zipOpenTimeout + + checkAuthenticationConfig(config) + + return config +} + +func checkAuthenticationConfig(config appConfig) { + if config.StoreSecret == "" && config.ClientID == "" && + config.ClientSecret == "" && config.RedirectURI == "" { + return + } + assertAuthConfig(config) +} + +func assertAuthConfig(config appConfig) { + if config.StoreSecret == "" { + log.Fatal("auth-secret must be defined if authentication is supported") + } + if config.ClientID == "" { + log.Fatal("auth-client-id must be defined if authentication is supported") + } + if config.ClientSecret == "" { + log.Fatal("auth-client-secret must be defined if authentication is supported") + } + if config.GitLabServer == "" { + log.Fatal("gitlab-server must be defined if authentication is supported") + } + if config.RedirectURI == "" { + log.Fatal("auth-redirect-uri must be defined if authentication is supported") + } +} + +func initErrorReporting(sentryDSN, sentryEnvironment string) { + errortracking.Initialize( + errortracking.WithSentryDSN(sentryDSN), + errortracking.WithVersion(fmt.Sprintf("%s-%s", VERSION, REVISION)), + errortracking.WithLoggerName("gitlab-pages"), + errortracking.WithSentryEnvironment(sentryEnvironment)) +} + +func loadConfig() appConfig { + if err := validateargs.NotAllowed(os.Args[1:]); err != nil { + log.WithError(err).Fatal("Using invalid arguments, use -config=gitlab-pages-config file instead") + } + + if err := validateargs.Deprecated(os.Args[1:]); err != nil { + log.WithError(err).Warn("Using deprecated arguments") + } + + config := configFromFlags() + if config.SentryDSN != "" { + initErrorReporting(config.SentryDSN, config.SentryEnvironment) + } + + log.WithFields(log.Fields{ + "artifacts-server": *artifactsServer, + "artifacts-server-timeout": *artifactsServerTimeout, + "daemon-gid": *daemonGID, + "daemon-uid": *daemonUID, + "daemon-inplace-chroot": *daemonInplaceChroot, + "default-config-filename": flag.DefaultConfigFlagname, + "disable-cross-origin-requests": *disableCrossOriginRequests, + "domain": config.Domain, + "insecure-ciphers": config.InsecureCiphers, + "listen-http": strings.Join(listenHTTP, ","), + "listen-https": strings.Join(listenHTTPS, ","), + "listen-proxy": strings.Join(listenProxy, ","), + "listen-https-proxyv2": strings.Join(ListenHTTPSProxyv2, ","), + "log-format": *logFormat, + "metrics-address": *metricsAddress, + "pages-domain": *pagesDomain, + "pages-root": *pagesRoot, + "pages-status": *pagesStatus, + "redirect-http": config.RedirectHTTP, + "root-cert": *pagesRootKey, + "root-key": *pagesRootCert, + "status_path": config.StatusPath, + "tls-min-version": *tlsMinVersion, + "tls-max-version": *tlsMaxVersion, + "use-http-2": config.HTTP2, + "gitlab-server": config.GitLabServer, + "internal-gitlab-server": config.InternalGitLabServer, + "api-secret-key": *gitLabAPISecretKey, + "domain-config-source": config.DomainConfigurationSource, + "auth-redirect-uri": config.RedirectURI, + "zip-cache-expiration": config.ZipCacheExpiry, + "zip-cache-cleanup": config.ZipCacheCleanup, + "zip-cache-refresh": config.ZipCacheRefresh, + "zip-open-timeout": config.ZipeOpenTimeout, + }).Debug("Start daemon with configuration") + + return config +} + +func appMain() { + var showVersion = flag.Bool("version", false, "Show version") + + // read from -config=/path/to/gitlab-pages-config + flag.String(flag.DefaultConfigFlagname, "", "path to config file") + + flag.Parse() + + if err := tlsconfig.ValidateTLSVersions(*tlsMinVersion, *tlsMaxVersion); err != nil { + fatal(err, "invalid TLS version") + } + + printVersion(*showVersion, VERSION) + + err := logging.ConfigureLogging(*logFormat, *logVerbose) + if err != nil { + log.WithError(err).Fatal("Failed to initialize logging") + } + + log.WithFields(log.Fields{ + "version": VERSION, + "revision": REVISION, + }).Print("GitLab Pages Daemon") + log.Printf("URL: https://gitlab.com/gitlab-org/gitlab-pages") + + if err := os.Chdir(*pagesRoot); err != nil { + fatal(err, "could not change directory into pagesRoot") + } + + config := loadConfig() + + for _, cs := range [][]io.Closer{ + createAppListeners(&config), + createMetricsListener(&config), + } { + defer closeAll(cs) + } + + if *daemonUID != 0 || *daemonGID != 0 { + if err := daemonize(config, *daemonUID, *daemonGID, *daemonInplaceChroot); err != nil { + errortracking.Capture(err) + fatal(err, "could not create pages daemon") + } + + return + } + + runApp(config) +} + +func closeAll(cs []io.Closer) { + for _, c := range cs { + c.Close() + } +} + +// createAppListeners returns net.Listener and *os.File instances. The +// caller must ensure they don't get closed or garbage-collected (which +// implies closing) too soon. +func createAppListeners(config *appConfig) []io.Closer { + var closers []io.Closer + + for _, addr := range listenHTTP.Split() { + l, f := createSocket(addr) + closers = append(closers, l, f) + + log.WithFields(log.Fields{ + "listener": addr, + }).Debug("Set up HTTP listener") + + config.ListenHTTP = append(config.ListenHTTP, f.Fd()) + } + + for _, addr := range listenHTTPS.Split() { + l, f := createSocket(addr) + closers = append(closers, l, f) + + log.WithFields(log.Fields{ + "listener": addr, + }).Debug("Set up HTTPS listener") + + config.ListenHTTPS = append(config.ListenHTTPS, f.Fd()) + } + + for _, addr := range listenProxy.Split() { + l, f := createSocket(addr) + closers = append(closers, l, f) + + log.WithFields(log.Fields{ + "listener": addr, + }).Debug("Set up proxy listener") + + config.ListenProxy = append(config.ListenProxy, f.Fd()) + } + + for _, addr := range ListenHTTPSProxyv2.Split() { + l, f := createSocket(addr) + closers = append(closers, l, f) + + log.WithFields(log.Fields{ + "listener": addr, + }).Debug("Set up https proxyv2 listener") + + config.ListenHTTPSProxyv2 = append(config.ListenHTTPSProxyv2, f.Fd()) + } + + return closers +} + +// createMetricsListener returns net.Listener and *os.File instances. The +// caller must ensure they don't get closed or garbage-collected (which +// implies closing) too soon. +func createMetricsListener(config *appConfig) []io.Closer { + addr := *metricsAddress + if addr == "" { + return nil + } + + l, f := createSocket(addr) + config.ListenMetrics = f.Fd() + + log.WithFields(log.Fields{ + "listener": addr, + }).Debug("Set up metrics listener") + + return []io.Closer{l, f} +} + +func printVersion(showVersion bool, version string) { + if showVersion { + fmt.Fprintf(os.Stdout, "%s\n", version) + os.Exit(0) + } +} + +func main() { + log.SetOutput(os.Stderr) + + rand.Seed(time.Now().UnixNano()) + + metrics.MustRegister() + + daemonMain() + appMain() +} diff --git a/metrics/metrics.go b/metrics/metrics.go new file mode 100644 index 000000000..045ff26e0 --- /dev/null +++ b/metrics/metrics.go @@ -0,0 +1,240 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// TODO: remove disk source metrics https://gitlab.com/gitlab-org/gitlab-pages/-/issues/382 +var ( + // DomainsServed counts the total number of sites served + DomainsServed = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "gitlab_pages_served_domains", + Help: "The number of sites served by this Pages app", + }) + + // DomainFailedUpdates counts the number of failed site updates + DomainFailedUpdates = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "gitlab_pages_domains_failed_total", + Help: "The total number of site updates that have failed since daemon start", + }) + + // DomainUpdates counts the number of site updates successfully processed + DomainUpdates = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "gitlab_pages_domains_updated_total", + Help: "The total number of site updates successfully processed since daemon start", + }) + + // DomainLastUpdateTime is the UNIX timestamp of the last update + DomainLastUpdateTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "gitlab_pages_last_domain_update_seconds", + Help: "UNIX timestamp of the last update", + }) + + // DomainsConfigurationUpdateDuration is the time it takes to update domains configuration from disk + DomainsConfigurationUpdateDuration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "gitlab_pages_domains_configuration_update_duration", + Help: "The time (in seconds) it takes to update domains configuration from disk", + }) + + // DomainsSourceCacheHit is the number of GitLab API call cache hits + DomainsSourceCacheHit = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "gitlab_pages_domains_source_cache_hit", + Help: "The number of GitLab domains API cache hits", + }) + + // DomainsSourceCacheMiss is the number of GitLab API call cache misses + DomainsSourceCacheMiss = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "gitlab_pages_domains_source_cache_miss", + Help: "The number of GitLab domains API cache misses", + }) + + // DomainsSourceFailures is the number of GitLab API calls that failed + DomainsSourceFailures = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "gitlab_pages_domains_source_failures_total", + Help: "The number of GitLab API calls that failed", + }) + + // ServerlessRequests measures the amount of serverless invocations + ServerlessRequests = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "gitlab_pages_serverless_requests", + Help: "The number of total GitLab Serverless requests served", + }) + + // ServerlessLatency records serverless serving roundtrip duration + ServerlessLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "gitlab_pages_serverless_latency", + Help: "Serverless serving roundtrip duration", + }) + + // DomainsSourceAPIReqTotal is the number of calls made to the GitLab API that returned a 4XX error + DomainsSourceAPIReqTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "gitlab_pages_domains_source_api_requests_total", + Help: "The number of GitLab domains API calls with different status codes", + }, []string{"status_code"}) + + // DomainsSourceAPICallDuration is the time it takes to get a response from the GitLab API in seconds + DomainsSourceAPICallDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "gitlab_pages_domains_source_api_call_duration", + Help: "The time (in seconds) it takes to get a response from the GitLab domains API", + }, []string{"status_code"}) + + // DomainsSourceAPITraceDuration requests trace duration in seconds for + // different stages of an http request (see httptrace.ClientTrace) + DomainsSourceAPITraceDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "gitlab_pages_domains_source_api_trace_duration", + Help: "Domain source API request tracing duration in seconds for " + + "different connection stages (see Go's httptrace.ClientTrace)", + Buckets: []float64{0.001, 0.005, 0.01, 0.02, 0.05, 0.100, 0.250, + 0.500, 1, 2, 5, 10, 20, 50}, + }, + []string{"request_stage"}, + ) + + // DiskServingFileSize metric for file size serving. Includes a vfs_name (local or zip). + DiskServingFileSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "gitlab_pages_disk_serving_file_size_bytes", + Help: "The size in bytes for each file that has been served", + // From 1B to 100MB in *10 increments (1 10 100 1,000 10,000 100,000 1'000,000 10'000,000 100'000,000) + Buckets: prometheus.ExponentialBuckets(1.0, 10.0, 9), + }, []string{"vfs_name"}) + + // ServingTime metric for time taken to find a file serving it or not found. + ServingTime = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "gitlab_pages_serving_time_seconds", + Help: "The time (in seconds) taken to serve a file", + Buckets: []float64{0.1, 0.5, 1, 2.5, 5, 10, 60, 180}, + }) + + // VFSOperations metric for VFS operations (lstat, readlink, open) + VFSOperations = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "gitlab_pages_vfs_operations_total", + Help: "The number of VFS operations", + }, []string{"vfs_name", "operation", "success"}) + + // HTTPRangeRequestsTotal is the number of requests made to a + // httprange.Resource by opening and/or reading from it. Mostly used by the + // internal/vfs/zip package to load archives from Object Storage. + // Could be bigger than the number of pages served. + HTTPRangeRequestsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "gitlab_pages_httprange_requests_total", + Help: "The number of requests made by the zip VFS to a Resource with " + + "different status codes." + + "Could be bigger than the number of requests served", + }, []string{"status_code"}) + + // HTTPRangeRequestDuration is the time it takes to get a response + // from an httprange.Resource hosted in object storage for a request made by + // the zip VFS + HTTPRangeRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "gitlab_pages_httprange_requests_duration", + Help: "The time (in seconds) it takes to get a response from " + + "a httprange.Resource hosted in object storage for a request " + + "made by the zip VFS", + }, + []string{"status_code"}, + ) + + // HTTPRangeTraceDuration httprange requests duration in seconds for + // different stages of an http request (see httptrace.ClientTrace) + HTTPRangeTraceDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "gitlab_pages_httprange_trace_duration", + Help: "httprange request tracing duration in seconds for " + + "different connection stages (see Go's httptrace.ClientTrace)", + Buckets: []float64{0.001, 0.005, 0.01, 0.02, 0.05, 0.100, 0.250, + 0.500, 1, 2, 5, 10, 20, 50}, + }, + []string{"request_stage"}, + ) + + // HTTPRangeOpenRequests is the number of open requests made by httprange.Reader + HTTPRangeOpenRequests = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "gitlab_pages_httprange_open_requests", + Help: "The number of open requests made by httprange.Reader", + }) + + // ZipOpened is the number of zip archives that have been opened + ZipOpened = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "gitlab_pages_zip_opened", + Help: "The total number of zip archives that have been opened", + }, + []string{"state"}, + ) + + // ZipCacheRequests is the number of cache hits/misses + ZipCacheRequests = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "gitlab_pages_zip_cache_requests", + Help: "The number of zip archives cache hits/misses", + }, + []string{"op", "cache"}, + ) + + // ZipCachedArchives is the number of entries in the cache + ZipCachedEntries = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "gitlab_pages_zip_cached_entries", + Help: "The number of entries in the cache", + }, + []string{"op"}, + ) + + // ZipArchiveEntriesCached is the number of files per zip archive currently + // in the cache + ZipArchiveEntriesCached = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "gitlab_pages_zip_archive_entries_cached", + Help: "The number of files per zip archive currently in the cache", + }, + ) + + // ZipOpenedEntriesCount is the number of files per archive total count + // over time + ZipOpenedEntriesCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "gitlab_pages_zip_opened_entries_count", + Help: "The number of files per zip archive total count over time", + }, + ) + + RejectedRequestsCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "gitlab_pages_unknown_method_rejected_requests", + Help: "The number of requests with unknown HTTP method which were rejected", + }, + ) +) + +// MustRegister collectors with the Prometheus client +func MustRegister() { + prometheus.MustRegister( + DomainsServed, + DomainFailedUpdates, + DomainUpdates, + DomainLastUpdateTime, + DomainsConfigurationUpdateDuration, + DomainsSourceCacheHit, + DomainsSourceCacheMiss, + DomainsSourceAPIReqTotal, + DomainsSourceAPICallDuration, + DomainsSourceAPITraceDuration, + DomainsSourceFailures, + ServerlessRequests, + ServerlessLatency, + DiskServingFileSize, + ServingTime, + VFSOperations, + HTTPRangeRequestsTotal, + HTTPRangeRequestDuration, + HTTPRangeTraceDuration, + HTTPRangeOpenRequests, + ZipOpened, + ZipOpenedEntriesCount, + ZipCacheRequests, + ZipArchiveEntriesCached, + ZipCachedEntries, + ) +} diff --git a/multi_string_flag.go b/multi_string_flag.go new file mode 100644 index 000000000..699529a0d --- /dev/null +++ b/multi_string_flag.go @@ -0,0 +1,37 @@ +package main + +import ( + "errors" + "strings" +) + +var errMultiStringSetEmptyValue = errors.New("value cannot be empty") + +// MultiStringFlag implements the flag.Value interface and allows a string flag +// to be specified multiple times on the command line. +// +// e.g.: -listen-http 127.0.0.1:80 -listen-http [::1]:80 +type MultiStringFlag []string + +// String returns the list of parameters joined with a commas (",") +func (s *MultiStringFlag) String() string { + return strings.Join(*s, ",") +} + +// Set appends the value to the list of parameters +func (s *MultiStringFlag) Set(value string) error { + if value == "" { + return errMultiStringSetEmptyValue + } + *s = append(*s, value) + return nil +} + +// Split each flag +func (s *MultiStringFlag) Split() (result []string) { + for _, str := range *s { + result = append(result, strings.Split(str, ",")...) + } + + return +} diff --git a/multi_string_flag_test.go b/multi_string_flag_test.go new file mode 100644 index 000000000..c09f7225c --- /dev/null +++ b/multi_string_flag_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMultiStringFlagAppendsOnSet(t *testing.T) { + var concrete MultiStringFlag + iface := &concrete + + require.NoError(t, iface.Set("foo")) + require.NoError(t, iface.Set("bar")) + + require.EqualError(t, iface.Set(""), "value cannot be empty") + + require.Equal(t, MultiStringFlag{"foo", "bar"}, concrete) +} + +func TestMultiStringFlag_Split(t *testing.T) { + tests := []struct { + name string + s *MultiStringFlag + wantResult []string + }{ + { + name: "empty_string", + s: &MultiStringFlag{}, // -flag "" + wantResult: []string{}, + }, + { + name: "one_value", + s: &MultiStringFlag{"value1"}, // -flag "value1" + wantResult: []string{"value1"}, + }, + { + name: "multiple_values", + s: &MultiStringFlag{"value1", "", "value3"}, // -flag "value1,,value3" + wantResult: []string{"value1", "", "value3"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotResult := tt.s.Split() + require.ElementsMatch(t, tt.wantResult, gotResult) + }) + } +} diff --git a/server.go b/server.go new file mode 100644 index 000000000..678367a3e --- /dev/null +++ b/server.go @@ -0,0 +1,76 @@ +package main + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "os" + "time" + + "github.com/gorilla/context" + proxyproto "github.com/pires/go-proxyproto" + "golang.org/x/net/http2" + + "gitlab.com/gitlab-org/gitlab-pages/internal/netutil" +) + +type keepAliveListener struct { + net.Listener +} + +type keepAliveSetter interface { + SetKeepAlive(bool) error + SetKeepAlivePeriod(time.Duration) error +} + +func (ln *keepAliveListener) Accept() (net.Conn, error) { + conn, err := ln.Listener.Accept() + if err != nil { + return nil, err + } + + kc := conn.(keepAliveSetter) + kc.SetKeepAlive(true) + kc.SetKeepAlivePeriod(3 * time.Minute) + + return conn, nil +} + +func listenAndServe(fd uintptr, handler http.Handler, useHTTP2 bool, tlsConfig *tls.Config, limiter *netutil.Limiter, proxyv2 bool) error { + // create server + server := &http.Server{Handler: context.ClearHandler(handler), TLSConfig: tlsConfig} + + if useHTTP2 { + err := http2.ConfigureServer(server, &http2.Server{}) + if err != nil { + return err + } + } + + l, err := net.FileListener(os.NewFile(fd, "[socket]")) + if err != nil { + return fmt.Errorf("failed to listen on FD %d: %v", fd, err) + } + + if limiter != nil { + l = netutil.SharedLimitListener(l, limiter) + } + + l = &keepAliveListener{l} + + if proxyv2 { + l = &proxyproto.Listener{ + Listener: l, + Policy: func(upstream net.Addr) (proxyproto.Policy, error) { + return proxyproto.REQUIRE, nil + }, + } + } + + if tlsConfig != nil { + l = tls.NewListener(l, server.TLSConfig) + } + + return server.Serve(l) +} diff --git a/shared/invalid-pages/.update/.gitkeep b/shared/invalid-pages/.update/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/shared/lookups/new-source-test.gitlab.io.json b/shared/lookups/new-source-test.gitlab.io.json new file mode 100644 index 000000000..f84fde354 --- /dev/null +++ b/shared/lookups/new-source-test.gitlab.io.json @@ -0,0 +1,16 @@ +{ + "certificate": "", + "key": "", + "lookup_paths": [ + { + "access_control": false, + "https_only": false, + "prefix": "/my/pages/project/", + "project_id": 123, + "source": { + "path": "group/new-source-test.gitlab.io/public/", + "type": "file" + } + } + ] +} diff --git a/shared/lookups/zip-malformed.gitlab.io.json b/shared/lookups/zip-malformed.gitlab.io.json new file mode 100644 index 000000000..8c0185dac --- /dev/null +++ b/shared/lookups/zip-malformed.gitlab.io.json @@ -0,0 +1,16 @@ +{ + "certificate": "", + "key": "", + "lookup_paths": [ + { + "access_control": false, + "https_only": false, + "prefix": "/", + "project_id": 123, + "source": { + "path": "http://127.0.0.1:38001/malformed.zip", + "type": "zip" + } + } + ] +} diff --git a/shared/lookups/zip-not-found.gitlab.io.json b/shared/lookups/zip-not-found.gitlab.io.json new file mode 100644 index 000000000..514b8ff2b --- /dev/null +++ b/shared/lookups/zip-not-found.gitlab.io.json @@ -0,0 +1,16 @@ +{ + "certificate": "", + "key": "", + "lookup_paths": [ + { + "access_control": false, + "https_only": false, + "prefix": "/", + "project_id": 123, + "source": { + "path": "http://127.0.0.1:38001/not-found.zip", + "type": "zip" + } + } + ] +} diff --git a/shared/lookups/zip.gitlab.io.json b/shared/lookups/zip.gitlab.io.json new file mode 100644 index 000000000..0549adc82 --- /dev/null +++ b/shared/lookups/zip.gitlab.io.json @@ -0,0 +1,16 @@ +{ + "certificate": "", + "key": "", + "lookup_paths": [ + { + "access_control": false, + "https_only": false, + "prefix": "/", + "project_id": 123, + "source": { + "path": "http://127.0.0.1:38001/public.zip", + "type": "zip" + } + } + ] +} diff --git a/shared/pages/.hidden.group/project/.gitkeep b/shared/pages/.hidden.group/project/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/@hashed/hashed.gitlab.io/config.json b/shared/pages/@hashed/hashed.gitlab.io/config.json new file mode 100644 index 000000000..100963e42 --- /dev/null +++ b/shared/pages/@hashed/hashed.gitlab.io/config.json @@ -0,0 +1,7 @@ +{ + "Domains": [ + { + "Domain": "hashed.com" + } + ] +} diff --git a/shared/pages/@hashed/hashed.gitlab.io/public/index.html b/shared/pages/@hashed/hashed.gitlab.io/public/index.html new file mode 100644 index 000000000..175ec4695 --- /dev/null +++ b/shared/pages/@hashed/hashed.gitlab.io/public/index.html @@ -0,0 +1 @@ +@hashed/index.html diff --git a/shared/pages/CapitalGroup/CapitalProject/public/index.html b/shared/pages/CapitalGroup/CapitalProject/public/index.html new file mode 100644 index 000000000..e977a6bf9 --- /dev/null +++ b/shared/pages/CapitalGroup/CapitalProject/public/index.html @@ -0,0 +1 @@ +Capital Group & Project diff --git a/shared/pages/CapitalGroup/project/public/index.html b/shared/pages/CapitalGroup/project/public/index.html new file mode 100644 index 000000000..6f2b99674 --- /dev/null +++ b/shared/pages/CapitalGroup/project/public/index.html @@ -0,0 +1 @@ +Capital Group diff --git a/shared/pages/README.md b/shared/pages/README.md new file mode 100644 index 000000000..afd6db1cf --- /dev/null +++ b/shared/pages/README.md @@ -0,0 +1 @@ +We use that folder to store example files for various pages. diff --git a/shared/pages/group.404/domain.404/config.json b/shared/pages/group.404/domain.404/config.json new file mode 100644 index 000000000..c2adb9f66 --- /dev/null +++ b/shared/pages/group.404/domain.404/config.json @@ -0,0 +1,7 @@ +{ + "Domains": [ + { + "Domain": "domain.404.com" + } + ] +} diff --git a/shared/pages/group.404/domain.404/public/404.html b/shared/pages/group.404/domain.404/public/404.html new file mode 100644 index 000000000..ad0ed073b --- /dev/null +++ b/shared/pages/group.404/domain.404/public/404.html @@ -0,0 +1 @@ +Custom domain.404 page diff --git a/shared/pages/group.404/group.404.gitlab-example.com/public/404.html b/shared/pages/group.404/group.404.gitlab-example.com/public/404.html new file mode 100644 index 000000000..454a3d507 --- /dev/null +++ b/shared/pages/group.404/group.404.gitlab-example.com/public/404.html @@ -0,0 +1 @@ +Custom 404 group page diff --git a/shared/pages/group.404/group.404.test.io/public/404.html b/shared/pages/group.404/group.404.test.io/public/404.html new file mode 100644 index 000000000..454a3d507 --- /dev/null +++ b/shared/pages/group.404/group.404.test.io/public/404.html @@ -0,0 +1 @@ +Custom 404 group page diff --git a/shared/pages/group.404/private_project/config.json b/shared/pages/group.404/private_project/config.json new file mode 100644 index 000000000..5c0ebb508 --- /dev/null +++ b/shared/pages/group.404/private_project/config.json @@ -0,0 +1,5 @@ +{ "domains": [ + { + "Domain": "group.404.gitlab-example.com" + } +], "id": 1000, "access_control": true } diff --git a/shared/pages/group.404/private_project/public/404.html b/shared/pages/group.404/private_project/public/404.html new file mode 100644 index 000000000..6993ae1a7 --- /dev/null +++ b/shared/pages/group.404/private_project/public/404.html @@ -0,0 +1 @@ +Private custom 404 error page diff --git a/shared/pages/group.404/private_unauthorized/config.json b/shared/pages/group.404/private_unauthorized/config.json new file mode 100644 index 000000000..79349565e --- /dev/null +++ b/shared/pages/group.404/private_unauthorized/config.json @@ -0,0 +1,5 @@ +{ "domains": [ + { + "Domain": "group.404.gitlab-example.com" + } +], "id": 2000, "access_control": true } diff --git a/shared/pages/group.404/private_unauthorized/public/404.html b/shared/pages/group.404/private_unauthorized/public/404.html new file mode 100644 index 000000000..6993ae1a7 --- /dev/null +++ b/shared/pages/group.404/private_unauthorized/public/404.html @@ -0,0 +1 @@ +Private custom 404 error page diff --git a/shared/pages/group.404/project.404.symlink/public/404.html b/shared/pages/group.404/project.404.symlink/public/404.html new file mode 120000 index 000000000..798ae7a27 --- /dev/null +++ b/shared/pages/group.404/project.404.symlink/public/404.html @@ -0,0 +1 @@ +../../project.404/public/404.html \ No newline at end of file diff --git a/shared/pages/group.404/project.404/public/404.html b/shared/pages/group.404/project.404/public/404.html new file mode 100644 index 000000000..896a32d03 --- /dev/null +++ b/shared/pages/group.404/project.404/public/404.html @@ -0,0 +1 @@ +Custom 404 project page diff --git a/shared/pages/group.404/project.no.404/public/index.html b/shared/pages/group.404/project.no.404/public/index.html new file mode 100644 index 000000000..896c9cfcc --- /dev/null +++ b/shared/pages/group.404/project.no.404/public/index.html @@ -0,0 +1 @@ +Index page diff --git a/shared/pages/group.acme/with.acme.challenge/config.json b/shared/pages/group.acme/with.acme.challenge/config.json new file mode 100644 index 000000000..f50ba7fa6 --- /dev/null +++ b/shared/pages/group.acme/with.acme.challenge/config.json @@ -0,0 +1,6 @@ +{ "domains": [ + { + "domain": "withacmechallenge.domain.com" + } + ] +} diff --git a/shared/pages/group.acme/with.acme.challenge/public/.well-known/acme-challenge/existingtoken b/shared/pages/group.acme/with.acme.challenge/public/.well-known/acme-challenge/existingtoken new file mode 100644 index 000000000..84455e1d0 --- /dev/null +++ b/shared/pages/group.acme/with.acme.challenge/public/.well-known/acme-challenge/existingtoken @@ -0,0 +1 @@ +this is token diff --git a/shared/pages/group.acme/with.acme.challenge/public/.well-known/acme-challenge/foldertoken/index.html b/shared/pages/group.acme/with.acme.challenge/public/.well-known/acme-challenge/foldertoken/index.html new file mode 100644 index 000000000..40047a2a2 --- /dev/null +++ b/shared/pages/group.acme/with.acme.challenge/public/.well-known/acme-challenge/foldertoken/index.html @@ -0,0 +1 @@ +foldertoken diff --git a/shared/pages/group.acme/with.acme.challenge/public/index.html b/shared/pages/group.acme/with.acme.challenge/public/index.html new file mode 100644 index 000000000..9015a7a32 --- /dev/null +++ b/shared/pages/group.acme/with.acme.challenge/public/index.html @@ -0,0 +1 @@ +index diff --git a/shared/pages/group.auth/group.auth.gitlab-example.com/config.json b/shared/pages/group.auth/group.auth.gitlab-example.com/config.json new file mode 100644 index 000000000..292ba6730 --- /dev/null +++ b/shared/pages/group.auth/group.auth.gitlab-example.com/config.json @@ -0,0 +1 @@ +{ "domains": [], "id": 1000, "access_control": true } diff --git a/shared/pages/group.auth/group.auth.gitlab-example.com/public/404.html b/shared/pages/group.auth/group.auth.gitlab-example.com/public/404.html new file mode 100644 index 000000000..f345e8bc7 --- /dev/null +++ b/shared/pages/group.auth/group.auth.gitlab-example.com/public/404.html @@ -0,0 +1 @@ +group.auth.gitlab-example.com namespace custom 404 diff --git a/shared/pages/group.auth/group.auth.gitlab-example.com/public/index.html b/shared/pages/group.auth/group.auth.gitlab-example.com/public/index.html new file mode 100644 index 000000000..d86bac9de --- /dev/null +++ b/shared/pages/group.auth/group.auth.gitlab-example.com/public/index.html @@ -0,0 +1 @@ +OK diff --git a/shared/pages/group.auth/group.auth.gitlab-example.com/public/private.project/index.html b/shared/pages/group.auth/group.auth.gitlab-example.com/public/private.project/index.html new file mode 100644 index 000000000..7c9933f70 --- /dev/null +++ b/shared/pages/group.auth/group.auth.gitlab-example.com/public/private.project/index.html @@ -0,0 +1 @@ +domain project subdirectory diff --git a/shared/pages/group.auth/private.project.1/config.json b/shared/pages/group.auth/private.project.1/config.json new file mode 100644 index 000000000..dbff776fe --- /dev/null +++ b/shared/pages/group.auth/private.project.1/config.json @@ -0,0 +1 @@ +{ "domains": [], "id": 2000, "access_control": true } diff --git a/shared/pages/group.auth/private.project.1/public/404.html b/shared/pages/group.auth/private.project.1/public/404.html new file mode 100644 index 000000000..3b751385c --- /dev/null +++ b/shared/pages/group.auth/private.project.1/public/404.html @@ -0,0 +1 @@ +group.auth.gitlab-example.com/private.project.1 custom 404 diff --git a/shared/pages/group.auth/private.project.1/public/index.html b/shared/pages/group.auth/private.project.1/public/index.html new file mode 100644 index 000000000..c8c6761a5 --- /dev/null +++ b/shared/pages/group.auth/private.project.1/public/index.html @@ -0,0 +1 @@ +private \ No newline at end of file diff --git a/shared/pages/group.auth/private.project.2/config.json b/shared/pages/group.auth/private.project.2/config.json new file mode 100644 index 000000000..6c5952195 --- /dev/null +++ b/shared/pages/group.auth/private.project.2/config.json @@ -0,0 +1 @@ +{ "domains": [], "id": 3000, "access_control": true } diff --git a/shared/pages/group.auth/private.project.2/public/index.html b/shared/pages/group.auth/private.project.2/public/index.html new file mode 100644 index 000000000..c8c6761a5 --- /dev/null +++ b/shared/pages/group.auth/private.project.2/public/index.html @@ -0,0 +1 @@ +private \ No newline at end of file diff --git a/shared/pages/group.auth/private.project/config.json b/shared/pages/group.auth/private.project/config.json new file mode 100644 index 000000000..e7d754a04 --- /dev/null +++ b/shared/pages/group.auth/private.project/config.json @@ -0,0 +1,10 @@ +{ "domains": [ + { + "domain": "private.domain.com", + "id": 1000, + "access_control": true + } + ], + "id": 1000, + "access_control": true +} diff --git a/shared/pages/group.auth/private.project/public/index.html b/shared/pages/group.auth/private.project/public/index.html new file mode 100644 index 000000000..c8c6761a5 --- /dev/null +++ b/shared/pages/group.auth/private.project/public/index.html @@ -0,0 +1 @@ +private \ No newline at end of file diff --git a/shared/pages/group.auth/subgroup/private.project.1/config.json b/shared/pages/group.auth/subgroup/private.project.1/config.json new file mode 100644 index 000000000..bee560034 --- /dev/null +++ b/shared/pages/group.auth/subgroup/private.project.1/config.json @@ -0,0 +1 @@ +{ "domains": [], "id": 2001, "access_control": true } diff --git a/shared/pages/group.auth/subgroup/private.project.1/public/index.html b/shared/pages/group.auth/subgroup/private.project.1/public/index.html new file mode 100644 index 000000000..c8c6761a5 --- /dev/null +++ b/shared/pages/group.auth/subgroup/private.project.1/public/index.html @@ -0,0 +1 @@ +private \ No newline at end of file diff --git a/shared/pages/group.auth/subgroup/private.project.2/config.json b/shared/pages/group.auth/subgroup/private.project.2/config.json new file mode 100644 index 000000000..7545aed13 --- /dev/null +++ b/shared/pages/group.auth/subgroup/private.project.2/config.json @@ -0,0 +1 @@ +{ "domains": [], "id": 3001, "access_control": true } diff --git a/shared/pages/group.auth/subgroup/private.project.2/public/index.html b/shared/pages/group.auth/subgroup/private.project.2/public/index.html new file mode 100644 index 000000000..c8c6761a5 --- /dev/null +++ b/shared/pages/group.auth/subgroup/private.project.2/public/index.html @@ -0,0 +1 @@ +private \ No newline at end of file diff --git a/shared/pages/group.auth/subgroup/private.project/config.json b/shared/pages/group.auth/subgroup/private.project/config.json new file mode 100644 index 000000000..a76960d77 --- /dev/null +++ b/shared/pages/group.auth/subgroup/private.project/config.json @@ -0,0 +1 @@ +{ "domains": [], "id": 1001, "access_control": true } diff --git a/shared/pages/group.auth/subgroup/private.project/public/index.html b/shared/pages/group.auth/subgroup/private.project/public/index.html new file mode 100644 index 000000000..c8c6761a5 --- /dev/null +++ b/shared/pages/group.auth/subgroup/private.project/public/index.html @@ -0,0 +1 @@ +private \ No newline at end of file diff --git a/shared/pages/group.deleted/is_file.txt b/shared/pages/group.deleted/is_file.txt new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.deleted/project.deleted/public/.gitkeep b/shared/pages/group.deleted/project.deleted/public/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.hidden/.hidden.project/public/.gitkeep b/shared/pages/group.hidden/.hidden.project/public/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.https-only/project1/config.json b/shared/pages/group.https-only/project1/config.json new file mode 100644 index 000000000..88f61b801 --- /dev/null +++ b/shared/pages/group.https-only/project1/config.json @@ -0,0 +1 @@ +{"https_only":true,"domains":[]} diff --git a/shared/pages/group.https-only/project1/public/index.html b/shared/pages/group.https-only/project1/public/index.html new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/shared/pages/group.https-only/project1/public/index.html @@ -0,0 +1 @@ + diff --git a/shared/pages/group.https-only/project2/config.json b/shared/pages/group.https-only/project2/config.json new file mode 100644 index 000000000..6a3d66c4c --- /dev/null +++ b/shared/pages/group.https-only/project2/config.json @@ -0,0 +1 @@ +{"https_only":false,"domains":[]} diff --git a/shared/pages/group.https-only/project2/public/index.html b/shared/pages/group.https-only/project2/public/index.html new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.https-only/project3/config.json b/shared/pages/group.https-only/project3/config.json new file mode 100644 index 000000000..0dacdbdbe --- /dev/null +++ b/shared/pages/group.https-only/project3/config.json @@ -0,0 +1,8 @@ +{ + "domains": [ + { + "domain": "test.my-domain.com", + "https_only": true + } + ] +} diff --git a/shared/pages/group.https-only/project3/public/index.html b/shared/pages/group.https-only/project3/public/index.html new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.https-only/project4/config.json b/shared/pages/group.https-only/project4/config.json new file mode 100644 index 000000000..5ef48cefb --- /dev/null +++ b/shared/pages/group.https-only/project4/config.json @@ -0,0 +1,8 @@ +{ + "domains": [ + { + "domain": "test2.my-domain.com", + "https_only": false + } + ] +} diff --git a/shared/pages/group.https-only/project4/public/index.html b/shared/pages/group.https-only/project4/public/index.html new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.https-only/project5/config.json b/shared/pages/group.https-only/project5/config.json new file mode 100644 index 000000000..5813bb855 --- /dev/null +++ b/shared/pages/group.https-only/project5/config.json @@ -0,0 +1,11 @@ +{ + "httpsonly": true, + "domains": [ + { + "domain": "no.cert.com", + "certificate": "test", + "key": "test", + "httpsonly": false + } + ] +} diff --git a/shared/pages/group.https-only/project5/public/index.html b/shared/pages/group.https-only/project5/public/index.html new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.internal/project.internal/public/.gitkeep b/shared/pages/group.internal/project.internal/public/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.no.projects/.gitkeep b/shared/pages/group.no.projects/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.no.public/project/.gitkeep b/shared/pages/group.no.public/project/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group.redirects/custom-domain/config.json b/shared/pages/group.redirects/custom-domain/config.json new file mode 100644 index 000000000..06026e537 --- /dev/null +++ b/shared/pages/group.redirects/custom-domain/config.json @@ -0,0 +1,7 @@ +{ + "domains": [ + { + "domain": "redirects.custom-domain.com" + } + ] +} diff --git a/shared/pages/group.redirects/custom-domain/public/_redirects b/shared/pages/group.redirects/custom-domain/public/_redirects new file mode 100644 index 000000000..42913b7ad --- /dev/null +++ b/shared/pages/group.redirects/custom-domain/public/_redirects @@ -0,0 +1,11 @@ +/redirect-portal.html /magic-land.html 302 +/cake-portal.html /still-alive.html 302 +/jobs/* /careers/:splat +/wardrobe.html /narnia.html 302 +/news/:year/:month/:date/:slug /blog/:year/:month/:date/:slug +/pit.html /spikes.html 302 +/goto-domain.html https://GitLab.com/pages.html 302 +/goto-bare-domain.html GitLab.com/pages.html 302 +/goto-schemaless.html //GitLab.com/pages.html 302 +/cake-portal/ /still-alive/ 302 +/file-override.html /should-not-be-here.html 302 diff --git a/shared/pages/group.redirects/group.redirects.gitlab-example.com/public/_redirects b/shared/pages/group.redirects/group.redirects.gitlab-example.com/public/_redirects new file mode 100644 index 000000000..42913b7ad --- /dev/null +++ b/shared/pages/group.redirects/group.redirects.gitlab-example.com/public/_redirects @@ -0,0 +1,11 @@ +/redirect-portal.html /magic-land.html 302 +/cake-portal.html /still-alive.html 302 +/jobs/* /careers/:splat +/wardrobe.html /narnia.html 302 +/news/:year/:month/:date/:slug /blog/:year/:month/:date/:slug +/pit.html /spikes.html 302 +/goto-domain.html https://GitLab.com/pages.html 302 +/goto-bare-domain.html GitLab.com/pages.html 302 +/goto-schemaless.html //GitLab.com/pages.html 302 +/cake-portal/ /still-alive/ 302 +/file-override.html /should-not-be-here.html 302 diff --git a/shared/pages/group.redirects/project-redirects/public/_redirects b/shared/pages/group.redirects/project-redirects/public/_redirects new file mode 100644 index 000000000..04c44ee47 --- /dev/null +++ b/shared/pages/group.redirects/project-redirects/public/_redirects @@ -0,0 +1,11 @@ +/project-redirects/redirect-portal.html /project-redirects/magic-land.html 302 +/project-redirects/cake-portal.html /project-redirects/still-alive.html 302 +/project-redirects/jobs/* /project-redirects/careers/:splat +/project-redirects/wardrobe.html /project-redirects/narnia.html 302 +/project-redirects/news/:year/:month/:date/:slug /project-redirects/blog/:year/:month/:date/:slug +/project-redirects/pit.html /project-redirects/spikes.html 302 +/project-redirects/goto-domain.html https://GitLab.com/pages.html 302 +/project-redirects/goto-bare-domain.html GitLab.com/pages.html 302 +/project-redirects/goto-schemaless.html //GitLab.com/pages.html 302 +/project-redirects/cake-portal/ /project-redirects/still-alive/ 302 +/project-redirects/file-override.html /project-redirects/should-not-be-here.html 302 diff --git a/shared/pages/group.redirects/project-redirects/public/file-override.html b/shared/pages/group.redirects/project-redirects/public/file-override.html new file mode 100644 index 000000000..63f14c5ef --- /dev/null +++ b/shared/pages/group.redirects/project-redirects/public/file-override.html @@ -0,0 +1 @@ +the file was served! diff --git a/shared/pages/group.redirects/project-redirects/public/index.html b/shared/pages/group.redirects/project-redirects/public/index.html new file mode 100644 index 000000000..985b27867 --- /dev/null +++ b/shared/pages/group.redirects/project-redirects/public/index.html @@ -0,0 +1 @@ +Visit http://group.redirects.pages.gdk.test:8090/project-redirects/redirect-portal.html and get redirected to http://group.redirects.pages.gdk.test:8090/project-redirects/magic-land.html diff --git a/shared/pages/group.redirects/project-redirects/public/magic-land.html b/shared/pages/group.redirects/project-redirects/public/magic-land.html new file mode 100644 index 000000000..f594fab75 --- /dev/null +++ b/shared/pages/group.redirects/project-redirects/public/magic-land.html @@ -0,0 +1 @@ +Magic land! diff --git a/shared/pages/group/CapitalProject/public/index.html b/shared/pages/group/CapitalProject/public/index.html new file mode 100644 index 000000000..e1b52676f --- /dev/null +++ b/shared/pages/group/CapitalProject/public/index.html @@ -0,0 +1 @@ +Capital Project diff --git a/shared/pages/group/group.gitlab-example.com/public/index.html b/shared/pages/group/group.gitlab-example.com/public/index.html new file mode 100644 index 000000000..d86bac9de --- /dev/null +++ b/shared/pages/group/group.gitlab-example.com/public/index.html @@ -0,0 +1 @@ +OK diff --git a/shared/pages/group/group.gitlab-example.com/public/index.html.br b/shared/pages/group/group.gitlab-example.com/public/index.html.br new file mode 100644 index 0000000000000000000000000000000000000000..cf5a9650dd361dec3c5a97f479617ce874b36366 GIT binary patch literal 8 PcmY%7U|{k0=3)i_1CRjK literal 0 HcmV?d00001 diff --git a/shared/pages/group/group.gitlab-example.com/public/index.html.gz b/shared/pages/group/group.gitlab-example.com/public/index.html.gz new file mode 100644 index 0000000000000000000000000000000000000000..fabfb92af0adc7bc2e25aa7ec76bb132b6332cf0 GIT binary patch literal 34 pcmb2|=HS?WtstI(IWsRMwL&kWBsYiQ^Y>>=4E8zq%b6J%7y#Ku3-bT~ literal 0 HcmV?d00001 diff --git a/shared/pages/group/group.gitlab-example.com/public/project/index.html b/shared/pages/group/group.gitlab-example.com/public/project/index.html new file mode 100644 index 000000000..7c9933f70 --- /dev/null +++ b/shared/pages/group/group.gitlab-example.com/public/project/index.html @@ -0,0 +1 @@ +domain project subdirectory diff --git a/shared/pages/group/group.test.io/.gitkeep b/shared/pages/group/group.test.io/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group/group.test.io/config.json b/shared/pages/group/group.test.io/config.json new file mode 100644 index 000000000..5b9be1fdd --- /dev/null +++ b/shared/pages/group/group.test.io/config.json @@ -0,0 +1,15 @@ +{ + "Domains": [ + { + "Domain": "test.domain.com" + }, + { + "Domain": "my.test.io" + }, + { + "Domain": "other.domain.com", + "Certificate": "test", + "Key": "key" + } + ] +} diff --git a/shared/pages/group/group.test.io/public/.gitkeep b/shared/pages/group/group.test.io/public/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group/group.test.io/public/gz-symlink b/shared/pages/group/group.test.io/public/gz-symlink new file mode 100644 index 000000000..6320cd248 --- /dev/null +++ b/shared/pages/group/group.test.io/public/gz-symlink @@ -0,0 +1 @@ +data \ No newline at end of file diff --git a/shared/pages/group/group.test.io/public/gz-symlink.br b/shared/pages/group/group.test.io/public/gz-symlink.br new file mode 120000 index 000000000..28e148537 --- /dev/null +++ b/shared/pages/group/group.test.io/public/gz-symlink.br @@ -0,0 +1 @@ +../config.json \ No newline at end of file diff --git a/shared/pages/group/group.test.io/public/gz-symlink.gz b/shared/pages/group/group.test.io/public/gz-symlink.gz new file mode 120000 index 000000000..28e148537 --- /dev/null +++ b/shared/pages/group/group.test.io/public/gz-symlink.gz @@ -0,0 +1 @@ +../config.json \ No newline at end of file diff --git a/shared/pages/group/group.test.io/public/image-nogzip.unknown b/shared/pages/group/group.test.io/public/image-nogzip.unknown new file mode 100644 index 0000000000000000000000000000000000000000..edaf2b97ab8256e904871ce15643d3275dd771e6 GIT binary patch literal 14 VcmZ?wbhEHbWMp7uU|?Xd1^^R50pbx1^^eZ17rXI literal 0 HcmV?d00001 diff --git a/shared/pages/group/group.test.io/public/index.html.gz b/shared/pages/group/group.test.io/public/index.html.gz new file mode 100644 index 0000000000000000000000000000000000000000..73fadf1decd778fce7de051ebda0ae8cf84a9c9d GIT binary patch literal 40 wcmb2|=HPJe`4PdyoSB!BTA`OwlAFVD+V{+v%f2Ud9x*XE*i`#)GB7Xz04bLZ(*OVf literal 0 HcmV?d00001 diff --git a/shared/pages/group/group.test.io/public/index2.html b/shared/pages/group/group.test.io/public/index2.html new file mode 100644 index 000000000..8da4d196c --- /dev/null +++ b/shared/pages/group/group.test.io/public/index2.html @@ -0,0 +1 @@ +main-dir diff --git a/shared/pages/group/group.test.io/public/index2.html.gz b/shared/pages/group/group.test.io/public/index2.html.gz new file mode 100644 index 0000000000000000000000000000000000000000..3975516e7cb373d4ce7b627564674cbcdfffb66f GIT binary patch literal 41 xcmb2|=HRgXQX0>|oSB!BT4AJ@QIea(aN76GnajQ>bsjM>IM`JCa56A3001wS4dnm; literal 0 HcmV?d00001 diff --git a/shared/pages/group/group.test.io/public/project2/index.html b/shared/pages/group/group.test.io/public/project2/index.html new file mode 100644 index 000000000..a47d57687 --- /dev/null +++ b/shared/pages/group/group.test.io/public/project2/index.html @@ -0,0 +1 @@ +project2-main diff --git a/shared/pages/group/group.test.io/public/text-nogzip.unknown b/shared/pages/group/group.test.io/public/text-nogzip.unknown new file mode 100644 index 000000000..b6fc4c620 --- /dev/null +++ b/shared/pages/group/group.test.io/public/text-nogzip.unknown @@ -0,0 +1 @@ +hello \ No newline at end of file diff --git a/shared/pages/group/group.test.io/public/text.unknown b/shared/pages/group/group.test.io/public/text.unknown new file mode 100644 index 000000000..b6fc4c620 --- /dev/null +++ b/shared/pages/group/group.test.io/public/text.unknown @@ -0,0 +1 @@ +hello \ No newline at end of file diff --git a/shared/pages/group/group.test.io/public/text.unknown.br b/shared/pages/group/group.test.io/public/text.unknown.br new file mode 100644 index 0000000000000000000000000000000000000000..99eaf2eb6bbf40f6d971bd43f65be759335f8e69 GIT binary patch literal 10 RcmY!sU|`8e&B@7U1^@~^0!;t_ literal 0 HcmV?d00001 diff --git a/shared/pages/group/group.test.io/public/text.unknown.gz b/shared/pages/group/group.test.io/public/text.unknown.gz new file mode 100644 index 0000000000000000000000000000000000000000..484e01a110e6181861b68c61997b5dcd7a2d1559 GIT binary patch literal 37 scmb2|=HLjH{2tA~T#{N`qL-ADn3R*qaN6VS$&>61ZOa7ASQ!`?0OrFBxBvhE literal 0 HcmV?d00001 diff --git a/shared/pages/group/new-source-test.gitlab.io/public/index.html b/shared/pages/group/new-source-test.gitlab.io/public/index.html new file mode 100644 index 000000000..00e11d669 --- /dev/null +++ b/shared/pages/group/new-source-test.gitlab.io/public/index.html @@ -0,0 +1 @@ +New Pages GitLab Source TEST OK diff --git a/shared/pages/group/project/public/file.webmanifest b/shared/pages/group/project/public/file.webmanifest new file mode 100644 index 000000000..e69de29bb diff --git a/shared/pages/group/project/public/index.html b/shared/pages/group/project/public/index.html new file mode 100644 index 000000000..4ea0a6ae9 --- /dev/null +++ b/shared/pages/group/project/public/index.html @@ -0,0 +1 @@ +project-subdir diff --git a/shared/pages/group/project/public/subdir/index.html b/shared/pages/group/project/public/subdir/index.html new file mode 100644 index 000000000..c4815ba3a --- /dev/null +++ b/shared/pages/group/project/public/subdir/index.html @@ -0,0 +1 @@ +project-subsubdir diff --git a/shared/pages/group/project2/public/index.html b/shared/pages/group/project2/public/index.html new file mode 100644 index 000000000..a47d57687 --- /dev/null +++ b/shared/pages/group/project2/public/index.html @@ -0,0 +1 @@ +project2-main diff --git a/shared/pages/group/project2/public/subdir/index.html b/shared/pages/group/project2/public/subdir/index.html new file mode 100644 index 000000000..ffcfa0b4a --- /dev/null +++ b/shared/pages/group/project2/public/subdir/index.html @@ -0,0 +1 @@ +project2-subdir diff --git a/shared/pages/group/serving/public/index.html b/shared/pages/group/serving/public/index.html new file mode 100644 index 000000000..b3a2c1c3b --- /dev/null +++ b/shared/pages/group/serving/public/index.html @@ -0,0 +1,6 @@ + + + +

HTML Document

+ + diff --git a/shared/pages/group/subgroup/project/public/index.html b/shared/pages/group/subgroup/project/public/index.html new file mode 100644 index 000000000..91ec223dc --- /dev/null +++ b/shared/pages/group/subgroup/project/public/index.html @@ -0,0 +1 @@ +A subgroup project diff --git a/shared/pages/group/subgroup/project/public/subdir/index.html b/shared/pages/group/subgroup/project/public/subdir/index.html new file mode 100644 index 000000000..59bf0492f --- /dev/null +++ b/shared/pages/group/subgroup/project/public/subdir/index.html @@ -0,0 +1 @@ +A subgroup project-subsubdir diff --git a/shared/pages/group/zip.gitlab.io/public-without-dirs.zip b/shared/pages/group/zip.gitlab.io/public-without-dirs.zip new file mode 100644 index 0000000000000000000000000000000000000000..a6cfdfcfc595b3fafeb77abe6c8d1c28a88bfa04 GIT binary patch literal 2117 zcmWIWW@h1H0D*U+`hnj1q8%Cx3=AMF!63s>P@0sJnXF%2nv{}Rq@R(RlasHPQIeYz z8p6rIEYF`1AKua$Us}P-*(j;NIIybN(%CuOJlQ?3A|pK`C9y2ZD9Otuq^hjcD7Prf z%-}J*T+L-_SiL$+^=I4&4UFX6 z&CA5dzyQMH2)mLJQ{sy&b3tKDCnWdH^7^bNsbwjK2VD=2~f$#z{tSBu%r>h#9g4VLJBmr!VKAT%wh`J^eP4h zwBm9E$9ZT7D+4pA(83y=xC$xc;5^NU9-K&V4mKWC{a`g7krq(PBxK{Qnc&95${At} zMJ<4k4PA^d6j7uQV=O3Rpq4cdV?inQ|3ZFdxLYwx9##fsP|1TQAV7wr<~n4wNQ5<0umGYdxaOu= zx`cZKL}WT9C1o0vS!Cu1XXWLG6sA;Wx%z~8mu3{Z2f0_IhFJy`l=%2oy5}VocvTpd z8T|X1GTLm}Ywy6jgatlqQxHd;6w@ zm_!5x7^Ik6mSm=ylw=i_qy-fRm{gh>WaRn<8G&!49o-)8L0JxS`0#h6B19~nc?w-St#Pl!K`c`&72IJ4E!t% J3|_1t9stdUa8UpN literal 0 HcmV?d00001 diff --git a/shared/pages/is_file b/shared/pages/is_file new file mode 100644 index 000000000..e69de29bb diff --git a/test/acceptance/acceptance_test.go b/test/acceptance/acceptance_test.go new file mode 100644 index 000000000..ba6528c10 --- /dev/null +++ b/test/acceptance/acceptance_test.go @@ -0,0 +1,81 @@ +package acceptance_test + +import ( + "flag" + "fmt" + "log" + "os" + "testing" + + "gitlab.com/gitlab-org/gitlab-pages/internal/fixture" +) + +const ( + objectStorageMockServer = "127.0.0.1:38001" +) + +var ( + pagesBinary = flag.String("gitlab-pages-binary", "../../gitlab-pages", "Path to the gitlab-pages binary") + + httpPort = "36000" + httpsPort = "37000" + httpProxyPort = "38000" + httpProxyV2Port = "39000" + + // TODO: Use TCP port 0 everywhere to avoid conflicts. The binary could output + // the actual port (and type of listener) for us to read in place of the + // hardcoded values below. + listeners = []ListenSpec{ + {"http", "127.0.0.1", httpPort}, + {"https", "127.0.0.1", httpsPort}, + {"proxy", "127.0.0.1", httpProxyPort}, + {"https-proxyv2", "127.0.0.1", httpProxyV2Port}, + // TODO: re-enable IPv6 listeners once https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/12258 is resolved + // https://gitlab.com/gitlab-org/gitlab-pages/-/issues/528 + // {"http", "::1", httpPort}, + // {"https", "::1", httpsPort}, + // {"proxy", "::1", httpProxyPort}, + // {"https-proxyv2", "::1", httpProxyV2Port}, + } + + httpListener = listeners[0] + httpsListener = listeners[1] + proxyListener = listeners[2] + httpsProxyv2Listener = listeners[3] +) + +func TestMain(m *testing.M) { + flag.Parse() + + if testing.Short() { + log.Println("Acceptance tests disabled") + os.Exit(0) + } + + if _, err := os.Stat(*pagesBinary); os.IsNotExist(err) { + log.Fatalf("Couldn't find gitlab-pages binary at %s\n", *pagesBinary) + } + + if ok := TestCertPool.AppendCertsFromPEM([]byte(fixture.Certificate)); !ok { + fmt.Println("Failed to load cert!") + } + + os.Exit(m.Run()) +} + +func skipUnlessEnabled(t *testing.T, conditions ...string) { + t.Helper() + + for _, condition := range conditions { + switch condition { + case "not-inplace-chroot": + if os.Getenv("TEST_DAEMONIZE") == "inplace" { + t.Log("Not supported with -daemon-inplace-chroot") + t.SkipNow() + } + default: + t.Error("Unknown condition:", condition) + t.FailNow() + } + } +} diff --git a/test/acceptance/acme_test.go b/test/acceptance/acme_test.go new file mode 100644 index 000000000..a0425b7d7 --- /dev/null +++ b/test/acceptance/acme_test.go @@ -0,0 +1,73 @@ +package acceptance_test + +import ( + "io/ioutil" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAcmeChallengesWhenItIsNotConfigured(t *testing.T) { + skipUnlessEnabled(t) + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", "") + defer teardown() + + t.Run("When domain folder contains requested acme challenge it responds with it", func(t *testing.T) { + rsp, err := GetRedirectPage(t, httpListener, "withacmechallenge.domain.com", + existingAcmeTokenPath) + + defer rsp.Body.Close() + require.NoError(t, err) + require.Equal(t, http.StatusOK, rsp.StatusCode) + body, _ := ioutil.ReadAll(rsp.Body) + require.Equal(t, "this is token\n", string(body)) + }) + + t.Run("When domain folder doesn't contains requested acme challenge it returns 404", + func(t *testing.T) { + rsp, err := GetRedirectPage(t, httpListener, "withacmechallenge.domain.com", + notExistingAcmeTokenPath) + + defer rsp.Body.Close() + require.NoError(t, err) + require.Equal(t, http.StatusNotFound, rsp.StatusCode) + }, + ) +} + +func TestAcmeChallengesWhenItIsConfigured(t *testing.T) { + skipUnlessEnabled(t) + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", "-gitlab-server=https://gitlab-acme.com") + defer teardown() + + t.Run("When domain folder contains requested acme challenge it responds with it", func(t *testing.T) { + rsp, err := GetRedirectPage(t, httpListener, "withacmechallenge.domain.com", + existingAcmeTokenPath) + + defer rsp.Body.Close() + require.NoError(t, err) + require.Equal(t, http.StatusOK, rsp.StatusCode) + body, _ := ioutil.ReadAll(rsp.Body) + require.Equal(t, "this is token\n", string(body)) + }) + + t.Run("When domain folder doesn't contains requested acme challenge it redirects to GitLab", + func(t *testing.T) { + rsp, err := GetRedirectPage(t, httpListener, "withacmechallenge.domain.com", + notExistingAcmeTokenPath) + + defer rsp.Body.Close() + require.NoError(t, err) + require.Equal(t, http.StatusTemporaryRedirect, rsp.StatusCode) + + url, err := url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + + require.Equal(t, url.String(), "https://gitlab-acme.com/-/acme-challenge?domain=withacmechallenge.domain.com&token=notexistingtoken") + }, + ) +} diff --git a/test/acceptance/artifacts_test.go b/test/acceptance/artifacts_test.go new file mode 100644 index 000000000..57c7a02a9 --- /dev/null +++ b/test/acceptance/artifacts_test.go @@ -0,0 +1,299 @@ +package acceptance_test + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestArtifactProxyRequest(t *testing.T) { + skipUnlessEnabled(t, "not-inplace-chroot") + + transport := (TestHTTPSClient.Transport).(*http.Transport) + defer func(t time.Duration) { + transport.ResponseHeaderTimeout = t + }(transport.ResponseHeaderTimeout) + transport.ResponseHeaderTimeout = 5 * time.Second + + content := "Title of the document" + contentLength := int64(len(content)) + testServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RawPath { + case "/api/v4/projects/group%2Fproject/jobs/1/artifacts/delayed_200.html": + time.Sleep(2 * time.Second) + fallthrough + case "/api/v4/projects/group%2Fproject/jobs/1/artifacts/200.html", + "/api/v4/projects/group%2Fsubgroup%2Fproject/jobs/1/artifacts/200.html": + w.Header().Set("Content-Type", "text/html; charset=utf-8") + fmt.Fprint(w, content) + case "/api/v4/projects/group%2Fproject/jobs/1/artifacts/500.html": + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, content) + default: + t.Logf("Unexpected r.URL.RawPath: %q", r.URL.RawPath) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusNotFound) + fmt.Fprint(w, content) + } + })) + + keyFile, certFile := CreateHTTPSFixtureFiles(t) + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + require.NoError(t, err) + defer os.Remove(keyFile) + defer os.Remove(certFile) + + testServer.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + testServer.StartTLS() + defer testServer.Close() + + tests := []struct { + name string + host string + path string + status int + binaryOption string + content string + length int64 + cacheControl string + contentType string + }{ + { + name: "basic proxied request", + host: "group.gitlab-example.com", + path: "/-/project/-/jobs/1/artifacts/200.html", + status: http.StatusOK, + binaryOption: "", + content: content, + length: contentLength, + cacheControl: "max-age=3600", + contentType: "text/html; charset=utf-8", + }, + { + name: "basic proxied request for subgroup", + host: "group.gitlab-example.com", + path: "/-/subgroup/project/-/jobs/1/artifacts/200.html", + status: http.StatusOK, + binaryOption: "", + content: content, + length: contentLength, + cacheControl: "max-age=3600", + contentType: "text/html; charset=utf-8", + }, + { + name: "502 error while attempting to proxy", + host: "group.gitlab-example.com", + path: "/-/project/-/jobs/1/artifacts/delayed_200.html", + status: http.StatusBadGateway, + binaryOption: "-artifacts-server-timeout=1", + content: "", + length: 0, + cacheControl: "", + contentType: "text/html; charset=utf-8", + }, + { + name: "Proxying 404 from server", + host: "group.gitlab-example.com", + path: "/-/project/-/jobs/1/artifacts/404.html", + status: http.StatusNotFound, + binaryOption: "", + content: "", + length: 0, + cacheControl: "", + contentType: "text/html; charset=utf-8", + }, + { + name: "Proxying 500 from server", + host: "group.gitlab-example.com", + path: "/-/project/-/jobs/1/artifacts/500.html", + status: http.StatusInternalServerError, + binaryOption: "", + content: "", + length: 0, + cacheControl: "", + contentType: "text/html; charset=utf-8", + }, + } + + // Ensure the IP address is used in the URL, as we're relying on IP SANs to + // validate + artifactServerURL := testServer.URL + "/api/v4" + t.Log("Artifact server URL", artifactServerURL) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + teardown := RunPagesProcessWithSSLCertFile( + t, + *pagesBinary, + listeners, + "", + certFile, + "-artifacts-server="+artifactServerURL, + tt.binaryOption, + ) + defer teardown() + + resp, err := GetPageFromListener(t, httpListener, tt.host, tt.path) + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, tt.status, resp.StatusCode) + require.Equal(t, tt.contentType, resp.Header.Get("Content-Type")) + + if !((tt.status == http.StatusBadGateway) || (tt.status == http.StatusNotFound) || (tt.status == http.StatusInternalServerError)) { + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, tt.content, string(body)) + require.Equal(t, tt.length, resp.ContentLength) + require.Equal(t, tt.cacheControl, resp.Header.Get("Cache-Control")) + } + }) + } +} + +func TestPrivateArtifactProxyRequest(t *testing.T) { + skipUnlessEnabled(t, "not-inplace-chroot") + + setupTransport(t) + + testServer := makeGitLabPagesAccessStub(t) + + keyFile, certFile := CreateHTTPSFixtureFiles(t) + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + require.NoError(t, err) + defer os.Remove(keyFile) + defer os.Remove(certFile) + + testServer.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + testServer.StartTLS() + defer testServer.Close() + + tests := []struct { + name string + host string + path string + status int + binaryOption string + }{ + { + name: "basic proxied request for private project", + host: "group.gitlab-example.com", + path: "/-/private/-/jobs/1/artifacts/200.html", + status: http.StatusOK, + binaryOption: "", + }, + { + name: "basic proxied request for subgroup", + host: "group.gitlab-example.com", + path: "/-/subgroup/private/-/jobs/1/artifacts/200.html", + status: http.StatusOK, + binaryOption: "", + }, + { + name: "502 error while attempting to proxy", + host: "group.gitlab-example.com", + path: "/-/private/-/jobs/1/artifacts/delayed_200.html", + status: http.StatusBadGateway, + binaryOption: "artifacts-server-timeout=1", + }, + { + name: "Proxying 404 from server", + host: "group.gitlab-example.com", + path: "/-/private/-/jobs/1/artifacts/404.html", + status: http.StatusNotFound, + binaryOption: "", + }, + { + name: "Proxying 500 from server", + host: "group.gitlab-example.com", + path: "/-/private/-/jobs/1/artifacts/500.html", + status: http.StatusInternalServerError, + binaryOption: "", + }, + } + + // Ensure the IP address is used in the URL, as we're relying on IP SANs to + // validate + artifactServerURL := testServer.URL + "/api/v4" + t.Log("Artifact server URL", artifactServerURL) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + configFile, cleanup := defaultConfigFileWith(t, + "artifacts-server="+artifactServerURL, + "auth-server="+testServer.URL, + "auth-redirect-uri=https://projects.gitlab-example.com/auth", + tt.binaryOption) + defer cleanup() + + teardown := RunPagesProcessWithSSLCertFile( + t, + *pagesBinary, + listeners, + "", + certFile, + "-config="+configFile, + ) + defer teardown() + + resp, err := GetRedirectPage(t, httpsListener, tt.host, tt.path) + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusFound, resp.StatusCode) + + cookie := resp.Header.Get("Set-Cookie") + + // Redirects to the projects under gitlab pages domain for authentication flow + url, err := url.Parse(resp.Header.Get("Location")) + require.NoError(t, err) + require.Equal(t, "projects.gitlab-example.com", url.Host) + require.Equal(t, "/auth", url.Path) + state := url.Query().Get("state") + + resp, err = GetRedirectPage(t, httpsListener, url.Host, url.Path+"?"+url.RawQuery) + + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusFound, resp.StatusCode) + pagesDomainCookie := resp.Header.Get("Set-Cookie") + + // Go to auth page with correct state will cause fetching the token + authrsp, err := GetRedirectPageWithCookie(t, httpsListener, "projects.gitlab-example.com", "/auth?code=1&state="+ + state, pagesDomainCookie) + + require.NoError(t, err) + defer authrsp.Body.Close() + + // Will redirect auth callback to correct host + url, err = url.Parse(authrsp.Header.Get("Location")) + require.NoError(t, err) + require.Equal(t, tt.host, url.Host) + require.Equal(t, "/auth", url.Path) + + // Request auth callback in project domain + authrsp, err = GetRedirectPageWithCookie(t, httpsListener, url.Host, url.Path+"?"+url.RawQuery, cookie) + require.NoError(t, err) + + // server returns the ticket, user will be redirected to the project page + require.Equal(t, http.StatusFound, authrsp.StatusCode) + cookie = authrsp.Header.Get("Set-Cookie") + resp, err = GetRedirectPageWithCookie(t, httpsListener, tt.host, tt.path, cookie) + + require.Equal(t, tt.status, resp.StatusCode) + + require.NoError(t, err) + defer resp.Body.Close() + }) + } +} diff --git a/test/acceptance/auth_test.go b/test/acceptance/auth_test.go new file mode 100644 index 000000000..fa2d768d8 --- /dev/null +++ b/test/acceptance/auth_test.go @@ -0,0 +1,730 @@ +package acceptance_test + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestWhenAuthIsDisabledPrivateIsNotAccessible(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", "") + defer teardown() + + rsp, err := GetPageFromListener(t, httpListener, "group.auth.gitlab-example.com", "private.project/") + + require.NoError(t, err) + rsp.Body.Close() + require.Equal(t, http.StatusInternalServerError, rsp.StatusCode) +} + +func TestWhenAuthIsEnabledPrivateWillRedirectToAuthorize(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcessWithAuth(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetRedirectPage(t, httpsListener, "group.auth.gitlab-example.com", "private.project/") + + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusFound, rsp.StatusCode) + require.Equal(t, 1, len(rsp.Header["Location"])) + url, err := url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + rsp, err = GetRedirectPage(t, httpsListener, url.Host, url.Path+"?"+url.RawQuery) + require.NoError(t, err) + + require.Equal(t, http.StatusFound, rsp.StatusCode) + require.Equal(t, 1, len(rsp.Header["Location"])) + + url, err = url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + + require.Equal(t, "https", url.Scheme) + require.Equal(t, "gitlab-auth.com", url.Host) + require.Equal(t, "/oauth/authorize", url.Path) + require.Equal(t, "clientID", url.Query().Get("client_id")) + require.Equal(t, "https://projects.gitlab-example.com/auth", url.Query().Get("redirect_uri")) + require.NotEqual(t, "", url.Query().Get("state")) +} + +func TestWhenAuthDeniedWillCauseUnauthorized(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcessWithAuth(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetPageFromListener(t, httpsListener, "projects.gitlab-example.com", "/auth?error=access_denied") + + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusUnauthorized, rsp.StatusCode) +} +func TestWhenLoginCallbackWithWrongStateShouldFail(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcessWithAuth(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetRedirectPage(t, httpsListener, "group.auth.gitlab-example.com", "private.project/") + + require.NoError(t, err) + defer rsp.Body.Close() + + // Go to auth page with wrong state will cause failure + authrsp, err := GetPageFromListener(t, httpsListener, "projects.gitlab-example.com", "/auth?code=0&state=0") + + require.NoError(t, err) + defer authrsp.Body.Close() + + require.Equal(t, http.StatusUnauthorized, authrsp.StatusCode) +} + +func TestWhenLoginCallbackWithUnencryptedCode(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcessWithAuth(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetRedirectPage(t, httpsListener, "group.auth.gitlab-example.com", "private.project/") + + require.NoError(t, err) + defer rsp.Body.Close() + + cookie := rsp.Header.Get("Set-Cookie") + + url, err := url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + + // Go to auth page with correct state will cause fetching the token + authrsp, err := GetPageFromListenerWithCookie(t, httpsListener, "projects.gitlab-example.com", "/auth?code=1&state="+ + url.Query().Get("state"), cookie) + + require.NoError(t, err) + defer authrsp.Body.Close() + + // Will cause 500 because the code is not encrypted + require.Equal(t, http.StatusInternalServerError, authrsp.StatusCode) +} + +func handleAccessControlArtifactRequests(t *testing.T, w http.ResponseWriter, r *http.Request) bool { + authorization := r.Header.Get("Authorization") + + switch { + case regexp.MustCompile(`/api/v4/projects/group/private/jobs/\d+/artifacts/delayed_200.html`).MatchString(r.URL.Path): + sleepIfAuthorized(t, authorization, w) + return true + case regexp.MustCompile(`/api/v4/projects/group/private/jobs/\d+/artifacts/404.html`).MatchString(r.URL.Path): + w.WriteHeader(http.StatusNotFound) + return true + case regexp.MustCompile(`/api/v4/projects/group/private/jobs/\d+/artifacts/500.html`).MatchString(r.URL.Path): + returnIfAuthorized(t, authorization, w, http.StatusInternalServerError) + return true + case regexp.MustCompile(`/api/v4/projects/group/private/jobs/\d+/artifacts/200.html`).MatchString(r.URL.Path): + returnIfAuthorized(t, authorization, w, http.StatusOK) + return true + case regexp.MustCompile(`/api/v4/projects/group/subgroup/private/jobs/\d+/artifacts/200.html`).MatchString(r.URL.Path): + returnIfAuthorized(t, authorization, w, http.StatusOK) + return true + default: + return false + } +} + +func handleAccessControlRequests(t *testing.T, w http.ResponseWriter, r *http.Request) { + allowedProjects := regexp.MustCompile(`/api/v4/projects/1\d{3}/pages_access`) + deniedProjects := regexp.MustCompile(`/api/v4/projects/2\d{3}/pages_access`) + invalidTokenProjects := regexp.MustCompile(`/api/v4/projects/3\d{3}/pages_access`) + + switch { + case allowedProjects.MatchString(r.URL.Path): + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusOK) + case deniedProjects.MatchString(r.URL.Path): + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusUnauthorized) + case invalidTokenProjects.MatchString(r.URL.Path): + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusUnauthorized) + fmt.Fprint(w, "{\"error\":\"invalid_token\"}") + default: + t.Logf("Unexpected r.URL.RawPath: %q", r.URL.Path) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusNotFound) + } +} + +func returnIfAuthorized(t *testing.T, authorization string, w http.ResponseWriter, status int) { + if authorization != "" { + require.Equal(t, "Bearer abc", authorization) + w.WriteHeader(status) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +func sleepIfAuthorized(t *testing.T, authorization string, w http.ResponseWriter) { + if authorization != "" { + require.Equal(t, "Bearer abc", authorization) + time.Sleep(2 * time.Second) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +func TestAccessControlUnderCustomDomain(t *testing.T) { + skipUnlessEnabled(t, "not-inplace-chroot") + + testServer := makeGitLabPagesAccessStub(t) + testServer.Start() + defer testServer.Close() + + teardown := RunPagesProcessWithAuthServer(t, *pagesBinary, listeners, "", testServer.URL) + defer teardown() + + tests := map[string]struct { + domain string + path string + }{ + "private_domain": { + domain: "private.domain.com", + path: "", + }, + "private_domain_with_query": { + domain: "private.domain.com", + path: "?q=test", + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + rsp, err := GetRedirectPage(t, httpListener, tt.domain, tt.path) + require.NoError(t, err) + defer rsp.Body.Close() + + cookie := rsp.Header.Get("Set-Cookie") + + url, err := url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + + state := url.Query().Get("state") + require.Equal(t, "http://"+tt.domain, url.Query().Get("domain")) + + pagesrsp, err := GetRedirectPage(t, httpListener, url.Host, url.Path+"?"+url.RawQuery) + require.NoError(t, err) + defer pagesrsp.Body.Close() + + pagescookie := pagesrsp.Header.Get("Set-Cookie") + + // Go to auth page with correct state will cause fetching the token + authrsp, err := GetRedirectPageWithCookie(t, httpListener, tt.domain, "/auth?code=1&state="+ + state, pagescookie) + + require.NoError(t, err) + defer authrsp.Body.Close() + + url, err = url.Parse(authrsp.Header.Get("Location")) + require.NoError(t, err) + + // Will redirect to custom domain + require.Equal(t, tt.domain, url.Host) + code := url.Query().Get("code") + require.NotEqual(t, "1", code) + + authrsp, err = GetRedirectPageWithCookie(t, httpListener, tt.domain, "/auth?code="+code+"&state="+ + state, cookie) + + require.NoError(t, err) + defer authrsp.Body.Close() + + // Will redirect to the page + cookie = authrsp.Header.Get("Set-Cookie") + require.Equal(t, http.StatusFound, authrsp.StatusCode) + + url, err = url.Parse(authrsp.Header.Get("Location")) + require.NoError(t, err) + + // Will redirect to custom domain + require.Equal(t, "http://"+tt.domain+"/"+tt.path, url.String()) + + // Fetch page in custom domain + authrsp, err = GetRedirectPageWithCookie(t, httpListener, tt.domain, tt.path, cookie) + require.NoError(t, err) + require.Equal(t, http.StatusOK, authrsp.StatusCode) + }) + } +} + +func TestCustomErrorPageWithAuth(t *testing.T) { + skipUnlessEnabled(t, "not-inplace-chroot") + testServer := makeGitLabPagesAccessStub(t) + testServer.Start() + defer testServer.Close() + + teardown := RunPagesProcessWithAuthServer(t, *pagesBinary, listeners, "", testServer.URL) + defer teardown() + + tests := []struct { + name string + domain string + path string + expectedErrorPage string + }{ + { + name: "private_project_authorized", + domain: "group.404.gitlab-example.com", + path: "/private_project/unknown", + expectedErrorPage: "Private custom 404 error page", + }, + { + name: "public_namespace_with_private_unauthorized_project", + domain: "group.404.gitlab-example.com", + // /private_unauthorized/config.json resolves project ID to 2000 which will cause a 401 from the mock GitLab testServer + path: "/private_unauthorized/unknown", + expectedErrorPage: "Custom 404 group page", + }, + { + name: "private_namespace_authorized", + domain: "group.auth.gitlab-example.com", + path: "/unknown", + expectedErrorPage: "group.auth.gitlab-example.com namespace custom 404", + }, + { + name: "private_namespace_with_private_project_auth_failed", + domain: "group.auth.gitlab-example.com", + // project ID is 2000 + path: "/private.project.1/unknown", + expectedErrorPage: "The page you're looking for could not be found.", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rsp, err := GetRedirectPage(t, httpListener, tt.domain, tt.path) + require.NoError(t, err) + defer rsp.Body.Close() + + cookie := rsp.Header.Get("Set-Cookie") + + url, err := url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + + state := url.Query().Get("state") + require.Equal(t, "http://"+tt.domain, url.Query().Get("domain")) + + pagesrsp, err := GetRedirectPage(t, httpListener, url.Host, url.Path+"?"+url.RawQuery) + require.NoError(t, err) + defer pagesrsp.Body.Close() + + pagescookie := pagesrsp.Header.Get("Set-Cookie") + + // Go to auth page with correct state will cause fetching the token + authrsp, err := GetRedirectPageWithCookie(t, httpListener, "projects.gitlab-example.com", "/auth?code=1&state="+ + state, pagescookie) + + require.NoError(t, err) + defer authrsp.Body.Close() + + url, err = url.Parse(authrsp.Header.Get("Location")) + require.NoError(t, err) + + // Will redirect to custom domain + require.Equal(t, tt.domain, url.Host) + // code must have changed since it's encrypted + code := url.Query().Get("code") + require.NotEqual(t, "1", code) + require.Equal(t, state, url.Query().Get("state")) + + // Run auth callback in custom domain + authrsp, err = GetRedirectPageWithCookie(t, httpListener, tt.domain, "/auth?code="+code+"&state="+ + state, cookie) + + require.NoError(t, err) + defer authrsp.Body.Close() + + // Will redirect to the page + groupCookie := authrsp.Header.Get("Set-Cookie") + require.Equal(t, http.StatusFound, authrsp.StatusCode) + + url, err = url.Parse(authrsp.Header.Get("Location")) + require.NoError(t, err) + + // Will redirect to custom domain error page + require.Equal(t, "http://"+tt.domain+tt.path, url.String()) + + // Fetch page in custom domain + anotherResp, err := GetRedirectPageWithCookie(t, httpListener, tt.domain, tt.path, groupCookie) + require.NoError(t, err) + + require.Equal(t, http.StatusNotFound, anotherResp.StatusCode) + + page, err := ioutil.ReadAll(anotherResp.Body) + require.NoError(t, err) + require.Contains(t, string(page), tt.expectedErrorPage) + }) + } +} + +func TestAccessControlUnderCustomDomainWithHTTPSProxy(t *testing.T) { + skipUnlessEnabled(t, "not-inplace-chroot") + + testServer := makeGitLabPagesAccessStub(t) + testServer.Start() + defer testServer.Close() + + teardown := RunPagesProcessWithAuthServer(t, *pagesBinary, listeners, "", testServer.URL) + defer teardown() + + rsp, err := GetProxyRedirectPageWithCookie(t, proxyListener, "private.domain.com", "/", "", true) + require.NoError(t, err) + defer rsp.Body.Close() + + cookie := rsp.Header.Get("Set-Cookie") + + url, err := url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + + state := url.Query().Get("state") + require.Equal(t, url.Query().Get("domain"), "https://private.domain.com") + pagesrsp, err := GetProxyRedirectPageWithCookie(t, proxyListener, url.Host, url.Path+"?"+url.RawQuery, "", true) + require.NoError(t, err) + defer pagesrsp.Body.Close() + + pagescookie := pagesrsp.Header.Get("Set-Cookie") + + // Go to auth page with correct state will cause fetching the token + authrsp, err := GetProxyRedirectPageWithCookie(t, proxyListener, + "projects.gitlab-example.com", "/auth?code=1&state="+state, + pagescookie, true) + + require.NoError(t, err) + defer authrsp.Body.Close() + + url, err = url.Parse(authrsp.Header.Get("Location")) + require.NoError(t, err) + + // Will redirect to custom domain + require.Equal(t, "private.domain.com", url.Host) + // code must have changed since it's encrypted + code := url.Query().Get("code") + require.NotEqual(t, "1", code) + require.Equal(t, state, url.Query().Get("state")) + + // Run auth callback in custom domain + authrsp, err = GetProxyRedirectPageWithCookie(t, proxyListener, "private.domain.com", + "/auth?code="+code+"&state="+state, cookie, true) + + require.NoError(t, err) + defer authrsp.Body.Close() + + // Will redirect to the page + cookie = authrsp.Header.Get("Set-Cookie") + require.Equal(t, http.StatusFound, authrsp.StatusCode) + + url, err = url.Parse(authrsp.Header.Get("Location")) + require.NoError(t, err) + + // Will redirect to custom domain + require.Equal(t, "https://private.domain.com/", url.String()) + // Fetch page in custom domain + authrsp, err = GetProxyRedirectPageWithCookie(t, proxyListener, "private.domain.com", "/", + cookie, true) + require.NoError(t, err) + require.Equal(t, http.StatusOK, authrsp.StatusCode) +} + +func TestAccessControlGroupDomain404RedirectsAuth(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcessWithAuth(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetRedirectPage(t, httpListener, "group.gitlab-example.com", "/nonexistent/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusFound, rsp.StatusCode) + // Redirects to the projects under gitlab pages domain for authentication flow + url, err := url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + require.Equal(t, "projects.gitlab-example.com", url.Host) + require.Equal(t, "/auth", url.Path) +} +func TestAccessControlProject404DoesNotRedirect(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcessWithAuth(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetRedirectPage(t, httpListener, "group.gitlab-example.com", "/project/nonexistent/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusNotFound, rsp.StatusCode) +} + +func setupTransport(t *testing.T) { + transport := (TestHTTPSClient.Transport).(*http.Transport) + defer func(t time.Duration) { + transport.ResponseHeaderTimeout = t + }(transport.ResponseHeaderTimeout) + transport.ResponseHeaderTimeout = 5 * time.Second +} + +type runPagesFunc func(t *testing.T, pagesPath string, listeners []ListenSpec, promPort string, sslCertFile string, authServer string) func() + +func testAccessControl(t *testing.T, runPages runPagesFunc) { + skipUnlessEnabled(t, "not-inplace-chroot") + + setupTransport(t) + + keyFile, certFile := CreateHTTPSFixtureFiles(t) + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + require.NoError(t, err) + defer os.Remove(keyFile) + defer os.Remove(certFile) + + testServer := makeGitLabPagesAccessStub(t) + testServer.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + testServer.StartTLS() + defer testServer.Close() + + tests := []struct { + host string + path string + status int + redirectBack bool + name string + }{ + { + name: "project with access", + host: "group.auth.gitlab-example.com", + path: "/private.project/", + status: http.StatusOK, + redirectBack: false, + }, + { + name: "project without access", + host: "group.auth.gitlab-example.com", + path: "/private.project.1/", + status: http.StatusNotFound, // Do not expose project existed + redirectBack: false, + }, + { + name: "invalid token test should redirect back", + host: "group.auth.gitlab-example.com", + path: "/private.project.2/", + status: http.StatusFound, + redirectBack: true, + }, + { + name: "no project should redirect to login and then return 404", + host: "group.auth.gitlab-example.com", + path: "/nonexistent/", + status: http.StatusNotFound, + redirectBack: false, + }, + { + name: "no project should redirect to login and then return 404", + host: "nonexistent.gitlab-example.com", + path: "/nonexistent/", + status: http.StatusNotFound, + redirectBack: false, + }, // subgroups + { + name: "[subgroup] project with access", + host: "group.auth.gitlab-example.com", + path: "/subgroup/private.project/", + status: http.StatusOK, + redirectBack: false, + }, + { + name: "[subgroup] project without access", + host: "group.auth.gitlab-example.com", + path: "/subgroup/private.project.1/", + status: http.StatusNotFound, // Do not expose project existed + redirectBack: false, + }, + { + name: "[subgroup] invalid token test should redirect back", + host: "group.auth.gitlab-example.com", + path: "/subgroup/private.project.2/", + status: http.StatusFound, + redirectBack: true, + }, + { + name: "[subgroup] no project should redirect to login and then return 404", + host: "group.auth.gitlab-example.com", + path: "/subgroup/nonexistent/", + status: http.StatusNotFound, + redirectBack: false, + }, + { + name: "[subgroup] no project should redirect to login and then return 404", + host: "nonexistent.gitlab-example.com", + path: "/subgroup/nonexistent/", + status: http.StatusNotFound, + redirectBack: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + teardown := runPages(t, *pagesBinary, listeners, "", certFile, testServer.URL) + defer teardown() + + rsp, err := GetRedirectPage(t, httpsListener, tt.host, tt.path) + + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusFound, rsp.StatusCode) + cookie := rsp.Header.Get("Set-Cookie") + + // Redirects to the projects under gitlab pages domain for authentication flow + url, err := url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + require.Equal(t, "projects.gitlab-example.com", url.Host) + require.Equal(t, "/auth", url.Path) + state := url.Query().Get("state") + + rsp, err = GetRedirectPage(t, httpsListener, url.Host, url.Path+"?"+url.RawQuery) + + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusFound, rsp.StatusCode) + pagesDomainCookie := rsp.Header.Get("Set-Cookie") + + // Go to auth page with correct state will cause fetching the token + authrsp, err := GetRedirectPageWithCookie(t, httpsListener, "projects.gitlab-example.com", "/auth?code=1&state="+ + state, pagesDomainCookie) + + require.NoError(t, err) + defer authrsp.Body.Close() + + // Will redirect auth callback to correct host + url, err = url.Parse(authrsp.Header.Get("Location")) + require.NoError(t, err) + require.Equal(t, tt.host, url.Host) + require.Equal(t, "/auth", url.Path) + + // Request auth callback in project domain + authrsp, err = GetRedirectPageWithCookie(t, httpsListener, url.Host, url.Path+"?"+url.RawQuery, cookie) + require.NoError(t, err) + + // server returns the ticket, user will be redirected to the project page + require.Equal(t, http.StatusFound, authrsp.StatusCode) + cookie = authrsp.Header.Get("Set-Cookie") + rsp, err = GetRedirectPageWithCookie(t, httpsListener, tt.host, tt.path, cookie) + + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, tt.status, rsp.StatusCode) + require.Equal(t, "", rsp.Header.Get("Cache-Control")) + + if tt.redirectBack { + url, err = url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + + require.Equal(t, "https", url.Scheme) + require.Equal(t, tt.host, url.Host) + require.Equal(t, tt.path, url.Path) + } + }) + } +} + +func TestAccessControlWithSSLCertFile(t *testing.T) { + testAccessControl(t, RunPagesProcessWithAuthServerWithSSLCertFile) +} + +func TestAccessControlWithSSLCertDir(t *testing.T) { + testAccessControl(t, RunPagesProcessWithAuthServerWithSSLCertDir) +} + +// This proves the fix for https://gitlab.com/gitlab-org/gitlab-pages/-/issues/262 +// Read the issue description if any changes to internal/auth/ break this test. +// Related to https://tools.ietf.org/html/rfc6749#section-10.6. +func TestHijackedCode(t *testing.T) { + skipUnlessEnabled(t, "not-inplace-chroot") + + testServer := makeGitLabPagesAccessStub(t) + testServer.Start() + defer testServer.Close() + + teardown := RunPagesProcessWithAuthServer(t, *pagesBinary, listeners, "", testServer.URL) + defer teardown() + + /****ATTACKER******/ + // get valid cookie for a different private project + targetDomain := "private.domain.com" + attackersDomain := "group.auth.gitlab-example.com" + attackerCookie, attackerState := getValidCookieAndState(t, targetDomain) + + /****TARGET******/ + // fool target to click on modified URL with attacker's domain for redirect with a valid state + hackedURL := fmt.Sprintf("/auth?domain=http://%s&state=%s", attackersDomain, "irrelevant") + maliciousResp, err := GetProxyRedirectPageWithCookie(t, proxyListener, "projects.gitlab-example.com", hackedURL, "", true) + require.NoError(t, err) + defer maliciousResp.Body.Close() + + pagesCookie := maliciousResp.Header.Get("Set-Cookie") + + /* + OAuth flow happens here... + */ + maliciousRespURL, err := url.Parse(maliciousResp.Header.Get("Location")) + require.NoError(t, err) + maliciousState := maliciousRespURL.Query().Get("state") + + // Go to auth page with correct state and code "obtained" from GitLab + authrsp, err := GetProxyRedirectPageWithCookie(t, proxyListener, + "projects.gitlab-example.com", "/auth?code=1&state="+maliciousState, + pagesCookie, true) + + require.NoError(t, err) + defer authrsp.Body.Close() + + /****ATTACKER******/ + // Target is redirected to attacker's domain and attacker receives the proper code + require.Equal(t, http.StatusFound, authrsp.StatusCode, "should redirect to attacker's domain") + authrspURL, err := url.Parse(authrsp.Header.Get("Location")) + require.NoError(t, err) + require.Contains(t, authrspURL.String(), attackersDomain) + + // attacker's got the code + hijackedCode := authrspURL.Query().Get("code") + require.NotEmpty(t, hijackedCode) + + // attacker tries to access private pages content + impersonatingRes, err := GetProxyRedirectPageWithCookie(t, proxyListener, targetDomain, + "/auth?code="+hijackedCode+"&state="+attackerState, attackerCookie, true) + require.NoError(t, err) + defer authrsp.Body.Close() + + require.Equal(t, impersonatingRes.StatusCode, http.StatusInternalServerError, "should fail to decode code") +} + +func getValidCookieAndState(t *testing.T, domain string) (string, string) { + t.Helper() + + // follow flow to get a valid cookie + // visit https:/// + rsp, err := GetProxyRedirectPageWithCookie(t, proxyListener, domain, "/", "", true) + require.NoError(t, err) + defer rsp.Body.Close() + + cookie := rsp.Header.Get("Set-Cookie") + require.NotEmpty(t, cookie) + + redirectURL, err := url.Parse(rsp.Header.Get("Location")) + require.NoError(t, err) + + state := redirectURL.Query().Get("state") + require.NotEmpty(t, state) + + return cookie, state +} diff --git a/test/acceptance/config_test.go b/test/acceptance/config_test.go new file mode 100644 index 000000000..93e9aa22e --- /dev/null +++ b/test/acceptance/config_test.go @@ -0,0 +1,66 @@ +package acceptance_test + +import ( + "fmt" + "net" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEnvironmentVariablesConfig(t *testing.T) { + skipUnlessEnabled(t) + os.Setenv("LISTEN_HTTP", net.JoinHostPort(httpListener.Host, httpListener.Port)) + defer func() { os.Unsetenv("LISTEN_HTTP") }() + + teardown := RunPagesProcessWithoutWait(t, *pagesBinary, []ListenSpec{}, "") + defer teardown() + require.NoError(t, httpListener.WaitUntilRequestSucceeds(nil)) + + rsp, err := GetPageFromListener(t, httpListener, "group.gitlab-example.com:", "project/") + + require.NoError(t, err) + rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) +} + +func TestMixedConfigSources(t *testing.T) { + skipUnlessEnabled(t) + os.Setenv("LISTEN_HTTP", net.JoinHostPort(httpListener.Host, httpListener.Port)) + defer func() { os.Unsetenv("LISTEN_HTTP") }() + + teardown := RunPagesProcessWithoutWait(t, *pagesBinary, []ListenSpec{httpsListener}, "") + defer teardown() + + for _, listener := range []ListenSpec{httpListener, httpsListener} { + require.NoError(t, listener.WaitUntilRequestSucceeds(nil)) + rsp, err := GetPageFromListener(t, listener, "group.gitlab-example.com", "project/") + require.NoError(t, err) + rsp.Body.Close() + + require.Equal(t, http.StatusOK, rsp.StatusCode) + } +} + +func TestMultiFlagEnvironmentVariables(t *testing.T) { + skipUnlessEnabled(t) + listenSpecs := []ListenSpec{{"http", "127.0.0.1", "37001"}, {"http", "127.0.0.1", "37002"}} + envVarValue := fmt.Sprintf("%s,%s", net.JoinHostPort("127.0.0.1", "37001"), net.JoinHostPort("127.0.0.1", "37002")) + + os.Setenv("LISTEN_HTTP", envVarValue) + defer func() { os.Unsetenv("LISTEN_HTTP") }() + + teardown := RunPagesProcess(t, *pagesBinary, []ListenSpec{}, "") + defer teardown() + + for _, listener := range listenSpecs { + require.NoError(t, listener.WaitUntilRequestSucceeds(nil)) + rsp, err := GetPageFromListener(t, listener, "group.gitlab-example.com", "project/") + + require.NoError(t, err) + rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) + } +} diff --git a/test/acceptance/encodings_test.go b/test/acceptance/encodings_test.go new file mode 100644 index 000000000..9b8742053 --- /dev/null +++ b/test/acceptance/encodings_test.go @@ -0,0 +1,78 @@ +package acceptance_test + +import ( + "mime" + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMIMETypes(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcessWithoutWait(t, *pagesBinary, listeners, "") + defer teardown() + + require.NoError(t, httpListener.WaitUntilRequestSucceeds(nil)) + + tests := map[string]struct { + file string + expectedContentType string + }{ + "manifest_json": { + file: "file.webmanifest", + expectedContentType: "application/manifest+json", + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + rsp, err := GetPageFromListener(t, httpListener, "group.gitlab-example.com", "project/"+tt.file) + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusOK, rsp.StatusCode) + mt, _, err := mime.ParseMediaType(rsp.Header.Get("Content-Type")) + require.NoError(t, err) + require.Equal(t, tt.expectedContentType, mt) + }) + } +} + +func TestCompressedEncoding(t *testing.T) { + skipUnlessEnabled(t) + + tests := []struct { + name string + host string + path string + encoding string + }{ + { + "gzip encoding", + "group.gitlab-example.com", + "index.html", + "gzip", + }, + { + "brotli encoding", + "group.gitlab-example.com", + "index.html", + "br", + }, + } + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rsp, err := GetCompressedPageFromListener(t, httpListener, "group.gitlab-example.com", "index.html", tt.encoding) + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusOK, rsp.StatusCode) + require.Equal(t, tt.encoding, rsp.Header.Get("Content-Encoding")) + }) + } +} diff --git a/test/acceptance/helpers_test.go b/test/acceptance/helpers_test.go new file mode 100644 index 000000000..d228f787b --- /dev/null +++ b/test/acceptance/helpers_test.go @@ -0,0 +1,631 @@ +package acceptance_test + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path" + "strings" + "sync" + "testing" + "time" + + proxyproto "github.com/pires/go-proxyproto" + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/request" +) + +// The HTTPS certificate isn't signed by anyone. This http client is set up +// so it can talk to servers using it. +var ( + // The HTTPS certificate isn't signed by anyone. This http client is set up + // so it can talk to servers using it. + TestHTTPSClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: TestCertPool}, + }, + } + + // Use HTTP with a very short timeout to repeatedly check for the server to be + // up. Again, ignore HTTP + QuickTimeoutHTTPSClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: TestCertPool}, + ResponseHeaderTimeout: 100 * time.Millisecond, + }, + } + + // Proxyv2 client + TestProxyv2Client = &http.Client{ + Transport: &http.Transport{ + DialContext: Proxyv2DialContext, + TLSClientConfig: &tls.Config{RootCAs: TestCertPool}, + }, + } + + QuickTimeoutProxyv2Client = &http.Client{ + Transport: &http.Transport{ + DialContext: Proxyv2DialContext, + TLSClientConfig: &tls.Config{RootCAs: TestCertPool}, + ResponseHeaderTimeout: 100 * time.Millisecond, + }, + } + + TestCertPool = x509.NewCertPool() + + // Proxyv2 will create a dummy request with src 10.1.1.1:1000 + // and dst 20.2.2.2:2000 + Proxyv2DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + + conn, err := d.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + + header := &proxyproto.Header{ + Version: 2, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddress: net.ParseIP("10.1.1.1"), + SourcePort: 1000, + DestinationAddress: net.ParseIP("20.2.2.2"), + DestinationPort: 2000, + } + + _, err = header.WriteTo(conn) + + return conn, err + } + + existingAcmeTokenPath = "/.well-known/acme-challenge/existingtoken" + notExistingAcmeTokenPath = "/.well-known/acme-challenge/notexistingtoken" +) + +type tWriter struct { + t *testing.T +} + +func (t *tWriter) Write(b []byte) (int, error) { + t.t.Log(string(bytes.TrimRight(b, "\r\n"))) + + return len(b), nil +} + +type LogCaptureBuffer struct { + b bytes.Buffer + m sync.Mutex +} + +func (b *LogCaptureBuffer) Read(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + return b.b.Read(p) +} +func (b *LogCaptureBuffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + return b.b.Write(p) +} +func (b *LogCaptureBuffer) String() string { + b.m.Lock() + defer b.m.Unlock() + + return b.b.String() +} +func (b *LogCaptureBuffer) Reset() { + b.m.Lock() + defer b.m.Unlock() + + b.b.Reset() +} + +// ListenSpec is used to point at a gitlab-pages http server, preserving the +// type of port it is (http, https, proxy) +type ListenSpec struct { + Type string + Host string + Port string +} + +func (l ListenSpec) URL(suffix string) string { + scheme := request.SchemeHTTP + if l.Type == request.SchemeHTTPS || l.Type == "https-proxyv2" { + scheme = request.SchemeHTTPS + } + + suffix = strings.TrimPrefix(suffix, "/") + + return fmt.Sprintf("%s://%s/%s", scheme, l.JoinHostPort(), suffix) +} + +// Returns only once this spec points at a working TCP server +func (l ListenSpec) WaitUntilRequestSucceeds(done chan struct{}) error { + timeout := 5 * time.Second + for start := time.Now(); time.Since(start) < timeout; { + select { + case <-done: + return fmt.Errorf("server has shut down already") + default: + } + + req, err := http.NewRequest("GET", l.URL("/"), nil) + if err != nil { + return err + } + + client := QuickTimeoutHTTPSClient + if l.Type == "https-proxyv2" { + client = QuickTimeoutProxyv2Client + } + + response, err := client.Transport.RoundTrip(req) + if err != nil { + time.Sleep(100 * time.Millisecond) + continue + } + response.Body.Close() + + if code := response.StatusCode; code >= 200 && code < 500 { + return nil + } + + time.Sleep(100 * time.Millisecond) + } + + return fmt.Errorf("timed out after %v waiting for listener %v", timeout, l) +} + +func (l ListenSpec) JoinHostPort() string { + return net.JoinHostPort(l.Host, l.Port) +} + +// RunPagesProcess will start a gitlab-pages process with the specified listeners +// and return a function you can call to shut it down again. Use +// GetPageFromProcess to do a HTTP GET against a listener. +// +// If run as root via sudo, the gitlab-pages process will drop privileges +func RunPagesProcess(t *testing.T, pagesBinary string, listeners []ListenSpec, promPort string, extraArgs ...string) (teardown func()) { + _, cleanup := runPagesProcess(t, true, pagesBinary, listeners, promPort, nil, extraArgs...) + return cleanup +} + +func RunPagesProcessWithoutWait(t *testing.T, pagesBinary string, listeners []ListenSpec, promPort string, extraArgs ...string) (teardown func()) { + _, cleanup := runPagesProcess(t, false, pagesBinary, listeners, promPort, nil, extraArgs...) + return cleanup +} + +func RunPagesProcessWithSSLCertFile(t *testing.T, pagesBinary string, listeners []ListenSpec, promPort string, sslCertFile string, extraArgs ...string) (teardown func()) { + _, cleanup := runPagesProcess(t, true, pagesBinary, listeners, promPort, []string{"SSL_CERT_FILE=" + sslCertFile}, extraArgs...) + return cleanup +} + +func RunPagesProcessWithEnvs(t *testing.T, wait bool, pagesBinary string, listeners []ListenSpec, promPort string, envs []string, extraArgs ...string) (teardown func()) { + _, cleanup := runPagesProcess(t, wait, pagesBinary, listeners, promPort, envs, extraArgs...) + return cleanup +} + +func RunPagesProcessWithOutput(t *testing.T, pagesBinary string, listeners []ListenSpec, promPort string, extraArgs ...string) (out *LogCaptureBuffer, teardown func()) { + return runPagesProcess(t, true, pagesBinary, listeners, promPort, nil, extraArgs...) +} + +func RunPagesProcessWithStubGitLabServer(t *testing.T, wait bool, pagesBinary string, listeners []ListenSpec, promPort string, envs []string, extraArgs ...string) (teardown func()) { + var apiCalled bool + source := NewGitlabDomainsSourceStub(t, &apiCalled, 0) + + gitLabAPISecretKey := CreateGitLabAPISecretKeyFixtureFile(t) + pagesArgs := append([]string{"-gitlab-server", source.URL, "-api-secret-key", gitLabAPISecretKey, "-domain-config-source", "gitlab"}, extraArgs...) + + _, cleanup := runPagesProcess(t, wait, pagesBinary, listeners, promPort, envs, pagesArgs...) + + return func() { + source.Close() + cleanup() + } +} + +func RunPagesProcessWithAuth(t *testing.T, pagesBinary string, listeners []ListenSpec, promPort string) func() { + configFile, cleanup := defaultConfigFileWith(t, + "auth-server=https://gitlab-auth.com", + "auth-redirect-uri=https://projects.gitlab-example.com/auth") + defer cleanup() + + _, cleanup2 := runPagesProcess(t, true, pagesBinary, listeners, promPort, nil, + "-config="+configFile, + ) + return cleanup2 +} + +func RunPagesProcessWithAuthServer(t *testing.T, pagesBinary string, listeners []ListenSpec, promPort string, authServer string) func() { + return runPagesProcessWithAuthServer(t, pagesBinary, listeners, promPort, nil, authServer) +} + +func RunPagesProcessWithAuthServerWithSSLCertFile(t *testing.T, pagesBinary string, listeners []ListenSpec, promPort string, sslCertFile string, authServer string) func() { + return runPagesProcessWithAuthServer(t, pagesBinary, listeners, promPort, + []string{"SSL_CERT_FILE=" + sslCertFile}, authServer) +} + +func RunPagesProcessWithAuthServerWithSSLCertDir(t *testing.T, pagesBinary string, listeners []ListenSpec, promPort string, sslCertFile string, authServer string) func() { + // Create temporary cert dir + sslCertDir, err := ioutil.TempDir("", "pages-test-SSL_CERT_DIR") + require.NoError(t, err) + + // Copy sslCertFile into temp cert dir + err = copyFile(sslCertDir+"/"+path.Base(sslCertFile), sslCertFile) + require.NoError(t, err) + + innerCleanup := runPagesProcessWithAuthServer(t, pagesBinary, listeners, promPort, + []string{"SSL_CERT_DIR=" + sslCertDir}, authServer) + + return func() { + innerCleanup() + os.RemoveAll(sslCertDir) + } +} + +func runPagesProcessWithAuthServer(t *testing.T, pagesBinary string, listeners []ListenSpec, promPort string, extraEnv []string, authServer string) func() { + configFile, cleanup := defaultConfigFileWith(t, + "auth-server="+authServer, + "auth-redirect-uri=https://projects.gitlab-example.com/auth") + defer cleanup() + + _, cleanup2 := runPagesProcess(t, true, pagesBinary, listeners, promPort, extraEnv, + "-config="+configFile) + return cleanup2 +} + +func runPagesProcess(t *testing.T, wait bool, pagesBinary string, listeners []ListenSpec, promPort string, extraEnv []string, extraArgs ...string) (*LogCaptureBuffer, func()) { + t.Helper() + + _, err := os.Stat(pagesBinary) + require.NoError(t, err) + + logBuf := &LogCaptureBuffer{} + out := io.MultiWriter(&tWriter{t}, logBuf) + + args, tempfiles := getPagesArgs(t, listeners, promPort, extraArgs) + cmd := exec.Command(pagesBinary, args...) + cmd.Env = append(os.Environ(), extraEnv...) + cmd.Stdout = out + cmd.Stderr = out + require.NoError(t, cmd.Start()) + t.Logf("Running %s %v", pagesBinary, args) + + waitCh := make(chan struct{}) + go func() { + cmd.Wait() + for _, tempfile := range tempfiles { + os.Remove(tempfile) + } + close(waitCh) + }() + + cleanup := func() { + cmd.Process.Signal(os.Interrupt) + <-waitCh + } + + if wait { + for _, spec := range listeners { + if err := spec.WaitUntilRequestSucceeds(waitCh); err != nil { + cleanup() + t.Fatal(err) + } + } + } + + return logBuf, cleanup +} + +func getPagesArgs(t *testing.T, listeners []ListenSpec, promPort string, extraArgs []string) (args, tempfiles []string) { + var hasHTTPS bool + + args = append(args, "-log-verbose=true") + + for _, spec := range listeners { + args = append(args, "-listen-"+spec.Type, spec.JoinHostPort()) + + if spec.Type == request.SchemeHTTPS { + hasHTTPS = true + } + } + + if hasHTTPS { + key, cert := CreateHTTPSFixtureFiles(t) + tempfiles = []string{key, cert} + args = append(args, "-root-key", key, "-root-cert", cert) + } + + if !contains(args, "pages-root") { + args = append(args, "-pages-root", "../../shared/pages") + } + + if promPort != "" { + args = append(args, "-metrics-address", promPort) + } + + args = append(args, getPagesDaemonArgs(t)...) + args = append(args, extraArgs...) + + return +} + +func contains(slice []string, s string) bool { + for _, e := range slice { + if e == s { + return true + } + } + return false +} + +func getPagesDaemonArgs(t *testing.T) []string { + mode := os.Getenv("TEST_DAEMONIZE") + if mode == "" { + return nil + } + + if os.Geteuid() != 0 { + t.Log("Privilege-dropping requested but not running as root!") + t.FailNow() + return nil + } + + out := []string{} + + switch mode { + case "tmpdir": + out = append(out, "-daemon-inplace-chroot=false") + case "inplace": + out = append(out, "-daemon-inplace-chroot=true") + default: + t.Log("Unknown daemonize mode", mode) + t.FailNow() + return nil + } + + t.Log("Running pages as a daemon") + + // This triggers the drop-privileges-and-chroot code in the pages daemon + out = append(out, "-daemon-uid", "0") + out = append(out, "-daemon-gid", "65534") + + return out +} + +// Does a HTTP(S) GET against the listener specified, setting a fake +// Host: and constructing the URL from the listener and the URL suffix. +func GetPageFromListener(t *testing.T, spec ListenSpec, host, urlsuffix string) (*http.Response, error) { + return GetPageFromListenerWithCookie(t, spec, host, urlsuffix, "") +} + +func GetPageFromListenerWithCookie(t *testing.T, spec ListenSpec, host, urlsuffix string, cookie string) (*http.Response, error) { + url := spec.URL(urlsuffix) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if cookie != "" { + req.Header.Set("Cookie", cookie) + } + + req.Host = host + + return DoPagesRequest(t, spec, req) +} + +func GetCompressedPageFromListener(t *testing.T, spec ListenSpec, host, urlsuffix string, encoding string) (*http.Response, error) { + url := spec.URL(urlsuffix) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Host = host + req.Header.Set("Accept-Encoding", encoding) + + return DoPagesRequest(t, spec, req) +} + +func GetProxiedPageFromListener(t *testing.T, spec ListenSpec, host, xForwardedHost, urlsuffix string) (*http.Response, error) { + url := spec.URL(urlsuffix) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + req.Host = host + req.Header.Set("X-Forwarded-Host", xForwardedHost) + + return DoPagesRequest(t, spec, req) +} + +func DoPagesRequest(t *testing.T, spec ListenSpec, req *http.Request) (*http.Response, error) { + t.Logf("curl -X %s -H'Host: %s' %s", req.Method, req.Host, req.URL) + + if spec.Type == "https-proxyv2" { + return TestProxyv2Client.Do(req) + } + + return TestHTTPSClient.Do(req) +} + +func GetRedirectPage(t *testing.T, spec ListenSpec, host, urlsuffix string) (*http.Response, error) { + return GetRedirectPageWithCookie(t, spec, host, urlsuffix, "") +} + +func GetProxyRedirectPageWithCookie(t *testing.T, spec ListenSpec, host string, urlsuffix string, cookie string, https bool) (*http.Response, error) { + schema := request.SchemeHTTP + if https { + schema = request.SchemeHTTPS + } + header := http.Header{ + "X-Forwarded-Proto": []string{schema}, + "X-Forwarded-Host": []string{host}, + "cookie": []string{cookie}, + } + + return GetRedirectPageWithHeaders(t, spec, host, urlsuffix, header) +} + +func GetRedirectPageWithCookie(t *testing.T, spec ListenSpec, host, urlsuffix string, cookie string) (*http.Response, error) { + return GetRedirectPageWithHeaders(t, spec, host, urlsuffix, http.Header{"cookie": []string{cookie}}) +} + +func GetRedirectPageWithHeaders(t *testing.T, spec ListenSpec, host, urlsuffix string, header http.Header) (*http.Response, error) { + url := spec.URL(urlsuffix) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header = header + + req.Host = host + + if spec.Type == "https-proxyv2" { + return TestProxyv2Client.Transport.RoundTrip(req) + } + + return TestHTTPSClient.Transport.RoundTrip(req) +} + +func ClientWithConfig(tlsConfig *tls.Config) (*http.Client, func()) { + tlsConfig.RootCAs = TestCertPool + tr := &http.Transport{TLSClientConfig: tlsConfig} + client := &http.Client{Transport: tr} + + return client, tr.CloseIdleConnections +} + +func waitForRoundtrips(t *testing.T, listeners []ListenSpec, timeout time.Duration) { + nListening := 0 + start := time.Now() + for _, spec := range listeners { + for time.Since(start) < timeout { + req, err := http.NewRequest("GET", spec.URL("/"), nil) + if err != nil { + t.Fatal(err) + } + + client := QuickTimeoutHTTPSClient + if spec.Type == "https-proxyv2" { + client = QuickTimeoutProxyv2Client + } + + if response, err := client.Transport.RoundTrip(req); err == nil { + nListening++ + response.Body.Close() + break + } + + time.Sleep(100 * time.Millisecond) + } + } + + require.Equal(t, len(listeners), nListening, "all listeners must be accepting TCP connections") +} + +func NewGitlabDomainsSourceStub(t *testing.T, apiCalled *bool, readyCount int) *httptest.Server { + *apiCalled = false + currentStatusCount := 0 + + mux := http.NewServeMux() + mux.HandleFunc("/api/v4/internal/pages/status", func(w http.ResponseWriter, r *http.Request) { + if currentStatusCount < readyCount { + w.WriteHeader(http.StatusBadGateway) + } + + w.WriteHeader(http.StatusNoContent) + }) + + handler := func(w http.ResponseWriter, r *http.Request) { + *apiCalled = true + domain := r.URL.Query().Get("host") + path := "../../shared/lookups/" + domain + ".json" + + fixture, err := os.Open(path) + if os.IsNotExist(err) { + w.WriteHeader(http.StatusNoContent) + + t.Logf("GitLab domain %s source stub served 204", domain) + return + } + + defer fixture.Close() + require.NoError(t, err) + + _, err = io.Copy(w, fixture) + require.NoError(t, err) + + t.Logf("GitLab domain %s source stub served lookup", domain) + } + mux.HandleFunc("/api/v4/internal/pages", handler) + + return httptest.NewServer(mux) +} + +func newConfigFile(configs ...string) (string, error) { + f, err := ioutil.TempFile(os.TempDir(), "gitlab-pages-config") + if err != nil { + return "", err + } + defer f.Close() + + for _, config := range configs { + _, err := fmt.Fprintf(f, "%s\n", config) + if err != nil { + return "", err + } + } + + return f.Name(), nil +} + +func defaultConfigFileWith(t *testing.T, configs ...string) (string, func()) { + configs = append(configs, "auth-client-id=clientID", + "auth-client-secret=clientSecret", + "auth-secret=authSecret") + + name, err := newConfigFile(configs...) + require.NoError(t, err) + + cleanup := func() { + err := os.Remove(name) + require.NoError(t, err) + } + + return name, cleanup +} + +func copyFile(dest, src string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + + srcInfo, err := srcFile.Stat() + if err != nil { + return err + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_EXCL, srcInfo.Mode()) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, srcFile) + return err +} diff --git a/test/acceptance/metrics_test.go b/test/acceptance/metrics_test.go new file mode 100644 index 000000000..64cfb60ac --- /dev/null +++ b/test/acceptance/metrics_test.go @@ -0,0 +1,62 @@ +package acceptance_test + +import ( + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPrometheusMetricsCanBeScraped(t *testing.T) { + skipUnlessEnabled(t) + + _, cleanup := newZipFileServerURL(t, "../../shared/pages/group/zip.gitlab.io/public.zip") + defer cleanup() + + teardown := RunPagesProcessWithStubGitLabServer(t, true, *pagesBinary, listeners, ":42345", []string{}) + defer teardown() + + // need to call an actual resource to populate certain metrics e.g. gitlab_pages_domains_source_api_requests_total + res, err := GetPageFromListener(t, httpListener, "zip.gitlab.io", + "/symlink.html") + require.NoError(t, err) + require.Equal(t, http.StatusOK, res.StatusCode) + + resp, err := http.Get("http://localhost:42345/metrics") + require.NoError(t, err) + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Contains(t, string(body), "gitlab_pages_http_in_flight_requests 0") + // TODO: remove metrics for disk source https://gitlab.com/gitlab-org/gitlab-pages/-/issues/382 + require.Contains(t, string(body), "gitlab_pages_served_domains 0") + require.Contains(t, string(body), "gitlab_pages_domains_failed_total 0") + require.Contains(t, string(body), "gitlab_pages_domains_updated_total 0") + require.Contains(t, string(body), "gitlab_pages_last_domain_update_seconds gauge") + require.Contains(t, string(body), "gitlab_pages_domains_configuration_update_duration gauge") + // end TODO + require.Contains(t, string(body), "gitlab_pages_domains_source_cache_hit") + require.Contains(t, string(body), "gitlab_pages_domains_source_cache_miss") + require.Contains(t, string(body), "gitlab_pages_domains_source_failures_total") + require.Contains(t, string(body), "gitlab_pages_serverless_requests 0") + require.Contains(t, string(body), "gitlab_pages_serverless_latency_sum 0") + require.Contains(t, string(body), "gitlab_pages_disk_serving_file_size_bytes_sum") + require.Contains(t, string(body), "gitlab_pages_serving_time_seconds_sum") + require.Contains(t, string(body), `gitlab_pages_domains_source_api_requests_total{status_code="200"}`) + require.Contains(t, string(body), `gitlab_pages_domains_source_api_call_duration_bucket`) + require.Contains(t, string(body), `gitlab_pages_domains_source_api_trace_duration`) + // httprange + require.Contains(t, string(body), `gitlab_pages_httprange_requests_total{status_code="206"}`) + require.Contains(t, string(body), "gitlab_pages_httprange_requests_duration_bucket") + require.Contains(t, string(body), "gitlab_pages_httprange_trace_duration") + require.Contains(t, string(body), "gitlab_pages_httprange_open_requests") + // zip archives + require.Contains(t, string(body), "gitlab_pages_zip_opened") + require.Contains(t, string(body), "gitlab_pages_zip_cache_requests") + require.Contains(t, string(body), "gitlab_pages_zip_cached_entries") + require.Contains(t, string(body), "gitlab_pages_zip_archive_entries_cached") + require.Contains(t, string(body), "gitlab_pages_zip_opened_entries_count") +} diff --git a/test/acceptance/proxyv2_test.go b/test/acceptance/proxyv2_test.go new file mode 100644 index 000000000..2a42f0f1c --- /dev/null +++ b/test/acceptance/proxyv2_test.go @@ -0,0 +1,57 @@ +package acceptance_test + +import ( + "io/ioutil" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestProxyv2(t *testing.T) { + skipUnlessEnabled(t) + + logBuf, teardown := RunPagesProcessWithOutput(t, *pagesBinary, listeners, "") + defer teardown() + + // the dummy client IP 10.1.1.1 is set by TestProxyv2Client + tests := map[string]struct { + host string + urlSuffix string + expectedStatusCode int + expectedContent string + expectedLog string + }{ + "basic_proxyv2_request": { + host: "group.gitlab-example.com", + urlSuffix: "project/", + expectedStatusCode: http.StatusOK, + expectedContent: "project-subdir\n", + expectedLog: "group.gitlab-example.com 10.1.1.1", + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + logBuf.Reset() + + response, err := GetPageFromListener(t, httpsProxyv2Listener, tt.host, tt.urlSuffix) + require.NoError(t, err) + defer response.Body.Close() + + require.Equal(t, tt.expectedStatusCode, response.StatusCode) + + body, err := ioutil.ReadAll(response.Body) + require.NoError(t, err) + + require.Contains(t, string(body), tt.expectedContent, "content mismatch") + + // give the process enough time to write the log message + require.Eventually(t, func() bool { + require.Contains(t, logBuf.String(), tt.expectedLog, "log mismatch") + return true + }, time.Second, time.Millisecond) + }) + } +} diff --git a/test/acceptance/redirects_test.go b/test/acceptance/redirects_test.go new file mode 100644 index 000000000..6c564ce69 --- /dev/null +++ b/test/acceptance/redirects_test.go @@ -0,0 +1,116 @@ +package acceptance_test + +import ( + "fmt" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDisabledRedirects(t *testing.T) { + skipUnlessEnabled(t) + + teardown := RunPagesProcessWithEnvs(t, true, *pagesBinary, listeners, "", []string{"FF_ENABLE_REDIRECTS=false"}) + defer teardown() + + // Test that redirects status page is forbidden + rsp, err := GetPageFromListener(t, httpListener, "group.redirects.gitlab-example.com", "/project-redirects/_redirects") + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusForbidden, rsp.StatusCode) + + // Test that redirects are disabled + rsp, err = GetRedirectPage(t, httpListener, "group.redirects.gitlab-example.com", "/project-redirects/redirect-portal.html") + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusNotFound, rsp.StatusCode) +} + +func TestRedirectStatusPage(t *testing.T) { + skipUnlessEnabled(t) + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetPageFromListener(t, httpListener, "group.redirects.gitlab-example.com", "/project-redirects/_redirects") + require.NoError(t, err) + + body, err := ioutil.ReadAll(rsp.Body) + require.NoError(t, err) + defer rsp.Body.Close() + + require.Contains(t, string(body), "11 rules") + require.Equal(t, http.StatusOK, rsp.StatusCode) +} + +func TestRedirect(t *testing.T) { + skipUnlessEnabled(t) + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + // Test that serving a file still works with redirects enabled + rsp, err := GetRedirectPage(t, httpListener, "group.redirects.gitlab-example.com", "/project-redirects/index.html") + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusOK, rsp.StatusCode) + + tests := []struct { + host string + path string + expectedStatus int + expectedLocation string + }{ + // Project domain + { + host: "group.redirects.gitlab-example.com", + path: "/project-redirects/redirect-portal.html", + expectedStatus: http.StatusFound, + expectedLocation: "/project-redirects/magic-land.html", + }, + // Make sure invalid rule does not redirect + { + host: "group.redirects.gitlab-example.com", + path: "/project-redirects/goto-domain.html", + expectedStatus: http.StatusNotFound, + expectedLocation: "", + }, + // Actual file on disk should override any redirects that match + { + host: "group.redirects.gitlab-example.com", + path: "/project-redirects/file-override.html", + expectedStatus: http.StatusOK, + expectedLocation: "", + }, + // Group-level domain + { + host: "group.redirects.gitlab-example.com", + path: "/redirect-portal.html", + expectedStatus: http.StatusFound, + expectedLocation: "/magic-land.html", + }, + // Custom domain + { + host: "redirects.custom-domain.com", + path: "/redirect-portal.html", + expectedStatus: http.StatusFound, + expectedLocation: "/magic-land.html", + }, + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("%s%s -> %s (%d)", tt.host, tt.path, tt.expectedLocation, tt.expectedStatus), func(t *testing.T) { + rsp, err := GetRedirectPage(t, httpListener, tt.host, tt.path) + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, tt.expectedLocation, rsp.Header.Get("Location")) + require.Equal(t, tt.expectedStatus, rsp.StatusCode) + }) + } +} diff --git a/test/acceptance/serving_test.go b/test/acceptance/serving_test.go new file mode 100644 index 000000000..e689d5981 --- /dev/null +++ b/test/acceptance/serving_test.go @@ -0,0 +1,574 @@ +package acceptance_test + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "path" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestUnknownHostReturnsNotFound(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + for _, spec := range listeners { + rsp, err := GetPageFromListener(t, spec, "invalid.invalid", "") + + require.NoError(t, err) + rsp.Body.Close() + require.Equal(t, http.StatusNotFound, rsp.StatusCode) + } +} + +func TestUnknownProjectReturnsNotFound(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetPageFromListener(t, httpListener, "group.gitlab-example.com", "/nonexistent/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusNotFound, rsp.StatusCode) +} + +func TestGroupDomainReturns200(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetPageFromListener(t, httpListener, "group.gitlab-example.com", "/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) +} + +func TestKnownHostReturns200(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + tests := []struct { + name string + host string + path string + }{ + { + name: "lower case", + host: "group.gitlab-example.com", + path: "project/", + }, + { + name: "capital project", + host: "group.gitlab-example.com", + path: "CapitalProject/", + }, + { + name: "capital group", + host: "CapitalGroup.gitlab-example.com", + path: "project/", + }, + { + name: "capital group and project", + host: "CapitalGroup.gitlab-example.com", + path: "CapitalProject/", + }, + { + name: "subgroup", + host: "group.gitlab-example.com", + path: "subgroup/project/", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for _, spec := range listeners { + rsp, err := GetPageFromListener(t, spec, tt.host, tt.path) + + require.NoError(t, err) + rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) + } + }) + } +} + +func TestNestedSubgroups(t *testing.T) { + skipUnlessEnabled(t) + + maxNestedSubgroup := 21 + + pagesRoot, err := ioutil.TempDir("", "pages-root") + require.NoError(t, err) + defer os.RemoveAll(pagesRoot) + + makeProjectIndex := func(subGroupPath string) { + projectPath := path.Join(pagesRoot, "nested", subGroupPath, "project", "public") + require.NoError(t, os.MkdirAll(projectPath, 0755)) + + projectIndex := path.Join(projectPath, "index.html") + require.NoError(t, ioutil.WriteFile(projectIndex, []byte("index"), 0644)) + } + makeProjectIndex("") + + paths := []string{""} + for i := 1; i < maxNestedSubgroup*2; i++ { + subGroupPath := fmt.Sprintf("%ssub%d/", paths[i-1], i) + paths = append(paths, subGroupPath) + + makeProjectIndex(subGroupPath) + } + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", "-pages-root", pagesRoot) + defer teardown() + + for nestingLevel, path := range paths { + t.Run(fmt.Sprintf("nested level %d", nestingLevel), func(t *testing.T) { + for _, spec := range listeners { + rsp, err := GetPageFromListener(t, spec, "nested.gitlab-example.com", path+"project/") + + require.NoError(t, err) + rsp.Body.Close() + if nestingLevel <= maxNestedSubgroup { + require.Equal(t, http.StatusOK, rsp.StatusCode) + } else { + require.Equal(t, http.StatusNotFound, rsp.StatusCode) + } + } + }) + } +} + +func TestCustom404(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + tests := []struct { + host string + path string + content string + }{ + { + host: "group.404.gitlab-example.com", + path: "project.404/not/existing-file", + content: "Custom 404 project page", + }, + { + host: "group.404.gitlab-example.com", + path: "project.404/", + content: "Custom 404 project page", + }, + { + host: "group.404.gitlab-example.com", + path: "not/existing-file", + content: "Custom 404 group page", + }, + { + host: "group.404.gitlab-example.com", + path: "not-existing-file", + content: "Custom 404 group page", + }, + { + host: "group.404.gitlab-example.com", + content: "Custom 404 group page", + }, + { + host: "domain.404.com", + content: "Custom domain.404 page", + }, + { + host: "group.404.gitlab-example.com", + path: "project.no.404/not/existing-file", + content: "The page you're looking for could not be found.", + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s/%s", test.host, test.path), func(t *testing.T) { + for _, spec := range listeners { + rsp, err := GetPageFromListener(t, spec, test.host, test.path) + + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusNotFound, rsp.StatusCode) + + page, err := ioutil.ReadAll(rsp.Body) + require.NoError(t, err) + require.Contains(t, string(page), test.content) + } + }) + } +} + +func TestCORSWhenDisabled(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", "-disable-cross-origin-requests") + defer teardown() + + for _, spec := range listeners { + for _, method := range []string{"GET", "OPTIONS"} { + rsp := doCrossOriginRequest(t, spec, method, method, spec.URL("project/")) + + require.Equal(t, http.StatusOK, rsp.StatusCode) + require.Equal(t, "", rsp.Header.Get("Access-Control-Allow-Origin")) + require.Equal(t, "", rsp.Header.Get("Access-Control-Allow-Credentials")) + } + } +} + +func TestCORSAllowsGET(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + for _, spec := range listeners { + for _, method := range []string{"GET", "OPTIONS"} { + rsp := doCrossOriginRequest(t, spec, method, method, spec.URL("project/")) + + require.Equal(t, http.StatusOK, rsp.StatusCode) + require.Equal(t, "*", rsp.Header.Get("Access-Control-Allow-Origin")) + require.Equal(t, "", rsp.Header.Get("Access-Control-Allow-Credentials")) + } + } +} + +func TestCORSForbidsPOST(t *testing.T) { + skipUnlessEnabled(t) + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + for _, spec := range listeners { + rsp := doCrossOriginRequest(t, spec, "OPTIONS", "POST", spec.URL("project/")) + + require.Equal(t, http.StatusOK, rsp.StatusCode) + require.Equal(t, "", rsp.Header.Get("Access-Control-Allow-Origin")) + require.Equal(t, "", rsp.Header.Get("Access-Control-Allow-Credentials")) + } +} + +func TestCustomHeaders(t *testing.T) { + skipUnlessEnabled(t) + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", "-header", "X-Test1:Testing1", "-header", "X-Test2:Testing2") + defer teardown() + + for _, spec := range listeners { + rsp, err := GetPageFromListener(t, spec, "group.gitlab-example.com:", "project/") + require.NoError(t, err) + require.Equal(t, http.StatusOK, rsp.StatusCode) + require.Equal(t, "Testing1", rsp.Header.Get("X-Test1")) + require.Equal(t, "Testing2", rsp.Header.Get("X-Test2")) + } +} + +func TestKnownHostWithPortReturns200(t *testing.T) { + skipUnlessEnabled(t) + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + for _, spec := range listeners { + rsp, err := GetPageFromListener(t, spec, "group.gitlab-example.com:"+spec.Port, "project/") + + require.NoError(t, err) + rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) + } +} + +func TestHttpToHttpsRedirectDisabled(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetRedirectPage(t, httpListener, "group.gitlab-example.com", "project/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) + + rsp, err = GetPageFromListener(t, httpsListener, "group.gitlab-example.com", "project/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) +} + +func TestHttpToHttpsRedirectEnabled(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", "-redirect-http=true") + defer teardown() + + rsp, err := GetRedirectPage(t, httpListener, "group.gitlab-example.com", "project/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusTemporaryRedirect, rsp.StatusCode) + require.Equal(t, 1, len(rsp.Header["Location"])) + require.Equal(t, "https://group.gitlab-example.com/project/", rsp.Header.Get("Location")) + + rsp, err = GetPageFromListener(t, httpsListener, "group.gitlab-example.com", "project/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) +} + +func TestHttpsOnlyGroupEnabled(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetRedirectPage(t, httpListener, "group.https-only.gitlab-example.com", "project1/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusMovedPermanently, rsp.StatusCode) +} + +func TestHttpsOnlyGroupDisabled(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetPageFromListener(t, httpListener, "group.https-only.gitlab-example.com", "project2/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) +} + +func TestHttpsOnlyProjectEnabled(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetRedirectPage(t, httpListener, "test.my-domain.com", "/index.html") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusMovedPermanently, rsp.StatusCode) +} + +func TestHttpsOnlyProjectDisabled(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetPageFromListener(t, httpListener, "test2.my-domain.com", "/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) +} + +func TestHttpsOnlyDomainDisabled(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetPageFromListener(t, httpListener, "no.cert.com", "/") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) +} + +func TestDomainsSource(t *testing.T) { + skipUnlessEnabled(t) + + type args struct { + configSource string + domain string + urlSuffix string + readyCount int + } + type want struct { + statusCode int + content string + apiCalled bool + } + tests := []struct { + name string + args args + want want + }{ + { + name: "gitlab_source_domain_exists", + args: args{ + configSource: "gitlab", + domain: "new-source-test.gitlab.io", + urlSuffix: "/my/pages/project/", + }, + want: want{ + statusCode: http.StatusOK, + content: "New Pages GitLab Source TEST OK\n", + apiCalled: true, + }, + }, + { + name: "gitlab_source_domain_does_not_exist", + args: args{ + configSource: "gitlab", + domain: "non-existent-domain.gitlab.io", + }, + want: want{ + statusCode: http.StatusNotFound, + apiCalled: true, + }, + }, + { + name: "disk_source_domain_exists", + args: args{ + configSource: "disk", + // test.domain.com sourced from disk configuration + domain: "test.domain.com", + urlSuffix: "/", + }, + want: want{ + statusCode: http.StatusOK, + content: "main-dir\n", + apiCalled: false, + }, + }, + { + name: "disk_source_domain_does_not_exist", + args: args{ + configSource: "disk", + domain: "non-existent-domain.gitlab.io", + }, + want: want{ + statusCode: http.StatusNotFound, + apiCalled: false, + }, + }, + { + name: "disk_source_domain_should_not_exist_under_hashed_dir", + args: args{ + configSource: "disk", + domain: "hashed.com", + }, + want: want{ + statusCode: http.StatusNotFound, + apiCalled: false, + }, + }, + { + name: "auto_source_gitlab_is_not_ready", + args: args{ + configSource: "auto", + domain: "test.domain.com", + urlSuffix: "/", + readyCount: 100, // big number to ensure the API is in bad state for a while + }, + want: want{ + statusCode: http.StatusOK, + content: "main-dir\n", + apiCalled: false, + }, + }, + { + name: "auto_source_gitlab_is_ready", + args: args{ + configSource: "auto", + domain: "new-source-test.gitlab.io", + urlSuffix: "/my/pages/project/", + readyCount: 0, + }, + want: want{ + statusCode: http.StatusOK, + content: "New Pages GitLab Source TEST OK\n", + apiCalled: true, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var apiCalled bool + source := NewGitlabDomainsSourceStub(t, &apiCalled, tt.args.readyCount) + defer source.Close() + + gitLabAPISecretKey := CreateGitLabAPISecretKeyFixtureFile(t) + + pagesArgs := []string{"-gitlab-server", source.URL, "-api-secret-key", gitLabAPISecretKey, "-domain-config-source", tt.args.configSource} + teardown := RunPagesProcessWithEnvs(t, true, *pagesBinary, listeners, "", []string{}, pagesArgs...) + defer teardown() + + response, err := GetPageFromListener(t, httpListener, tt.args.domain, tt.args.urlSuffix) + require.NoError(t, err) + + require.Equal(t, tt.want.statusCode, response.StatusCode) + if tt.want.statusCode == http.StatusOK { + defer response.Body.Close() + body, err := ioutil.ReadAll(response.Body) + require.NoError(t, err) + + require.Equal(t, tt.want.content, string(body), "content mismatch") + } + + require.Equal(t, tt.want.apiCalled, apiCalled, "api called mismatch") + }) + } +} + +func TestKnownHostInReverseProxySetupReturns200(t *testing.T) { + skipUnlessEnabled(t) + + var listeners = []ListenSpec{ + proxyListener, + // TODO: re-enable https://gitlab.com/gitlab-org/gitlab-pages/-/issues/528 + // {"proxy", "::1", "37002"}, + } + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + for _, spec := range listeners { + rsp, err := GetProxiedPageFromListener(t, spec, "localhost", "group.gitlab-example.com", "project/") + + require.NoError(t, err) + rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) + } +} + +func doCrossOriginRequest(t *testing.T, spec ListenSpec, method, reqMethod, url string) *http.Response { + req, err := http.NewRequest(method, url, nil) + require.NoError(t, err) + + req.Host = "group.gitlab-example.com" + req.Header.Add("Origin", "example.com") + req.Header.Add("Access-Control-Request-Method", reqMethod) + + var rsp *http.Response + err = fmt.Errorf("no request was made") + for start := time.Now(); time.Since(start) < 1*time.Second; { + rsp, err = DoPagesRequest(t, spec, req) + if err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + require.NoError(t, err) + + rsp.Body.Close() + return rsp +} + +func TestQueryStringPersistedInSlashRewrite(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + rsp, err := GetRedirectPage(t, httpsListener, "group.gitlab-example.com", "project?q=test") + require.NoError(t, err) + defer rsp.Body.Close() + + require.Equal(t, http.StatusFound, rsp.StatusCode) + require.Equal(t, 1, len(rsp.Header["Location"])) + require.Equal(t, "//group.gitlab-example.com/project/?q=test", rsp.Header.Get("Location")) + + rsp, err = GetPageFromListener(t, httpsListener, "group.gitlab-example.com", "project/?q=test") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) +} diff --git a/test/acceptance/status_test.go b/test/acceptance/status_test.go new file mode 100644 index 000000000..8e227ed80 --- /dev/null +++ b/test/acceptance/status_test.go @@ -0,0 +1,44 @@ +package acceptance_test + +import ( + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestStatusPage(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", "-pages-status=/@statuscheck") + defer teardown() + + rsp, err := GetPageFromListener(t, httpListener, "group.gitlab-example.com", "@statuscheck") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusOK, rsp.StatusCode) +} + +func TestStatusNotYetReady(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcessWithoutWait(t, *pagesBinary, listeners, "", "-pages-status=/@statuscheck", "-pages-root=../../shared/invalid-pages") + defer teardown() + + waitForRoundtrips(t, listeners, 5*time.Second) + rsp, err := GetPageFromListener(t, httpListener, "group.gitlab-example.com", "@statuscheck") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusServiceUnavailable, rsp.StatusCode) +} + +func TestPageNotAvailableIfNotLoaded(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcessWithoutWait(t, *pagesBinary, listeners, "", "-pages-root=../../shared/invalid-pages") + defer teardown() + waitForRoundtrips(t, listeners, 5*time.Second) + + rsp, err := GetPageFromListener(t, httpListener, "group.gitlab-example.com", "index.html") + require.NoError(t, err) + defer rsp.Body.Close() + require.Equal(t, http.StatusServiceUnavailable, rsp.StatusCode) +} diff --git a/test/acceptance/stub_test.go b/test/acceptance/stub_test.go new file mode 100644 index 000000000..8f52ec37a --- /dev/null +++ b/test/acceptance/stub_test.go @@ -0,0 +1,72 @@ +package acceptance_test + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/gitlab-org/gitlab-pages/internal/fixture" +) + +// makeGitLabPagesAccessStub provides a stub *httptest.Server to check pages_access API call. +// the result is based on the project id. +// +// Project IDs must be 4 digit long and the following rules applies: +// 1000-1999: Ok +// 2000-2999: Unauthorized +// 3000-3999: Invalid token +func makeGitLabPagesAccessStub(t *testing.T) *httptest.Server { + t.Helper() + + return httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/oauth/token": + require.Equal(t, "POST", r.Method) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, "{\"access_token\":\"abc\"}") + case "/api/v4/user": + require.Equal(t, "Bearer abc", r.Header.Get("Authorization")) + w.WriteHeader(http.StatusOK) + default: + if handleAccessControlArtifactRequests(t, w, r) { + return + } + handleAccessControlRequests(t, w, r) + } + })) +} + +func CreateHTTPSFixtureFiles(t *testing.T) (key string, cert string) { + t.Helper() + + keyfile, err := ioutil.TempFile("", "https-fixture") + require.NoError(t, err) + key = keyfile.Name() + keyfile.Close() + + certfile, err := ioutil.TempFile("", "https-fixture") + require.NoError(t, err) + cert = certfile.Name() + certfile.Close() + + require.NoError(t, ioutil.WriteFile(key, []byte(fixture.Key), 0644)) + require.NoError(t, ioutil.WriteFile(cert, []byte(fixture.Certificate), 0644)) + + return keyfile.Name(), certfile.Name() +} + +func CreateGitLabAPISecretKeyFixtureFile(t *testing.T) (filepath string) { + t.Helper() + + secretfile, err := ioutil.TempFile("", "gitlab-api-secret") + require.NoError(t, err) + secretfile.Close() + + require.NoError(t, ioutil.WriteFile(secretfile.Name(), []byte(fixture.GitLabAPISecretKey), 0644)) + + return secretfile.Name() +} diff --git a/test/acceptance/tls_test.go b/test/acceptance/tls_test.go new file mode 100644 index 000000000..3445c6c38 --- /dev/null +++ b/test/acceptance/tls_test.go @@ -0,0 +1,130 @@ +package acceptance_test + +import ( + "crypto/tls" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAcceptsSupportedCiphers(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + tlsConfig := &tls.Config{ + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + }, + } + client, cleanup := ClientWithConfig(tlsConfig) + defer cleanup() + + rsp, err := client.Get(httpsListener.URL("/")) + + if rsp != nil { + rsp.Body.Close() + } + + require.NoError(t, err) +} + +func tlsConfigWithInsecureCiphersOnly() *tls.Config { + return &tls.Config{ + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + }, + MaxVersion: tls.VersionTLS12, // ciphers for TLS1.3 are not configurable and will work if enabled + } +} + +func TestRejectsUnsupportedCiphers(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + client, cleanup := ClientWithConfig(tlsConfigWithInsecureCiphersOnly()) + defer cleanup() + + rsp, err := client.Get(httpsListener.URL("/")) + + if rsp != nil { + rsp.Body.Close() + } + + require.Error(t, err) + require.Nil(t, rsp) +} + +func TestEnableInsecureCiphers(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", "-insecure-ciphers") + defer teardown() + + client, cleanup := ClientWithConfig(tlsConfigWithInsecureCiphersOnly()) + defer cleanup() + + rsp, err := client.Get(httpsListener.URL("/")) + + if rsp != nil { + rsp.Body.Close() + } + + require.NoError(t, err) +} + +func TestTLSVersions(t *testing.T) { + skipUnlessEnabled(t) + + tests := map[string]struct { + tlsMin string + tlsMax string + tlsClient uint16 + expectError bool + }{ + "client version not supported": {tlsMin: "tls1.1", tlsMax: "tls1.2", tlsClient: tls.VersionTLS10, expectError: true}, + "client version supported": {tlsMin: "tls1.1", tlsMax: "tls1.2", tlsClient: tls.VersionTLS12, expectError: false}, + "client and server using default settings": {tlsMin: "", tlsMax: "", tlsClient: 0, expectError: false}, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + args := []string{} + if tc.tlsMin != "" { + args = append(args, "-tls-min-version", tc.tlsMin) + } + if tc.tlsMax != "" { + args = append(args, "-tls-max-version", tc.tlsMax) + } + + teardown := RunPagesProcess(t, *pagesBinary, listeners, "", args...) + defer teardown() + + tlsConfig := &tls.Config{} + if tc.tlsClient != 0 { + tlsConfig.MinVersion = tc.tlsClient + tlsConfig.MaxVersion = tc.tlsClient + } + client, cleanup := ClientWithConfig(tlsConfig) + defer cleanup() + + rsp, err := client.Get(httpsListener.URL("/")) + + if rsp != nil { + rsp.Body.Close() + } + + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/test/acceptance/unknown_http_method_test.go b/test/acceptance/unknown_http_method_test.go new file mode 100644 index 000000000..f6c5ffee5 --- /dev/null +++ b/test/acceptance/unknown_http_method_test.go @@ -0,0 +1,23 @@ +package acceptance_test + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUnknownHTTPMethod(t *testing.T) { + skipUnlessEnabled(t) + teardown := RunPagesProcess(t, *pagesBinary, listeners, "") + defer teardown() + + req, err := http.NewRequest("UNKNOWN", listeners[0].URL(""), nil) + require.NoError(t, err) + req.Host = "" + + resp, err := DoPagesRequest(t, httpListener, req) + require.NoError(t, err) + + require.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode) +} diff --git a/test/acceptance/zip_test.go b/test/acceptance/zip_test.go new file mode 100644 index 000000000..5d3037c81 --- /dev/null +++ b/test/acceptance/zip_test.go @@ -0,0 +1,161 @@ +package acceptance_test + +import ( + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestZipServing(t *testing.T) { + skipUnlessEnabled(t) + + var apiCalled bool + source := NewGitlabDomainsSourceStub(t, &apiCalled, 0) + defer source.Close() + + gitLabAPISecretKey := CreateGitLabAPISecretKeyFixtureFile(t) + + pagesArgs := []string{"-gitlab-server", source.URL, "-api-secret-key", gitLabAPISecretKey, "-domain-config-source", "gitlab"} + teardown := RunPagesProcessWithEnvs(t, true, *pagesBinary, listeners, "", []string{}, pagesArgs...) + defer teardown() + + _, cleanup := newZipFileServerURL(t, "../../shared/pages/group/zip.gitlab.io/public.zip") + defer cleanup() + + tests := map[string]struct { + host string + urlSuffix string + expectedStatusCode int + expectedContent string + }{ + "base_domain_no_suffix": { + host: "zip.gitlab.io", + urlSuffix: "/", + expectedStatusCode: http.StatusOK, + expectedContent: "zip.gitlab.io/project/index.html\n", + }, + "file_exists": { + host: "zip.gitlab.io", + urlSuffix: "/index.html", + expectedStatusCode: http.StatusOK, + expectedContent: "zip.gitlab.io/project/index.html\n", + }, + "file_exists_in_subdir": { + host: "zip.gitlab.io", + urlSuffix: "/subdir/hello.html", + expectedStatusCode: http.StatusOK, + expectedContent: "zip.gitlab.io/project/subdir/hello.html\n", + }, + "file_exists_symlink": { + host: "zip.gitlab.io", + urlSuffix: "/symlink.html", + expectedStatusCode: http.StatusOK, + expectedContent: "symlink.html->subdir/linked.html\n", + }, + "dir": { + host: "zip.gitlab.io", + urlSuffix: "/subdir/", + expectedStatusCode: http.StatusNotFound, + expectedContent: "zip.gitlab.io/project/404.html\n", + }, + "file_does_not_exist": { + host: "zip.gitlab.io", + urlSuffix: "/unknown.html", + expectedStatusCode: http.StatusNotFound, + expectedContent: "zip.gitlab.io/project/404.html\n", + }, + "bad_symlink": { + host: "zip.gitlab.io", + urlSuffix: "/bad-symlink.html", + expectedStatusCode: http.StatusNotFound, + expectedContent: "zip.gitlab.io/project/404.html\n", + }, + "with_not_found_zip": { + host: "zip-not-found.gitlab.io", + urlSuffix: "/", + expectedStatusCode: http.StatusNotFound, + expectedContent: "The page you're looking for could not be found", + }, + "with_malformed_zip": { + host: "zip-malformed.gitlab.io", + urlSuffix: "/", + expectedStatusCode: http.StatusInternalServerError, + expectedContent: "Something went wrong (500)", + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + response, err := GetPageFromListener(t, httpListener, tt.host, tt.urlSuffix) + require.NoError(t, err) + defer response.Body.Close() + + require.Equal(t, tt.expectedStatusCode, response.StatusCode) + + body, err := ioutil.ReadAll(response.Body) + require.NoError(t, err) + + require.Contains(t, string(body), tt.expectedContent, "content mismatch") + }) + } +} + +func TestZipServingConfigShortTimeout(t *testing.T) { + skipUnlessEnabled(t) + + var apiCalled bool + source := NewGitlabDomainsSourceStub(t, &apiCalled, 0) + defer source.Close() + + gitLabAPISecretKey := CreateGitLabAPISecretKeyFixtureFile(t) + + pagesArgs := []string{"-gitlab-server", source.URL, "-api-secret-key", gitLabAPISecretKey, "-domain-config-source", "gitlab", + "-zip-open-timeout=1ns"} // <- test purpose + + teardown := RunPagesProcessWithEnvs(t, true, *pagesBinary, listeners, "", []string{}, pagesArgs...) + defer teardown() + + _, cleanup := newZipFileServerURL(t, "../../shared/pages/group/zip.gitlab.io/public.zip") + defer cleanup() + + response, err := GetPageFromListener(t, httpListener, "zip.gitlab.io", "/") + require.NoError(t, err) + defer response.Body.Close() + + require.Equal(t, http.StatusInternalServerError, response.StatusCode, "should fail to serve") +} + +func newZipFileServerURL(t *testing.T, zipFilePath string) (string, func()) { + t.Helper() + + m := http.NewServeMux() + m.HandleFunc("/public.zip", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.ServeFile(w, r, zipFilePath) + })) + m.HandleFunc("/malformed.zip", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + + // create a listener with the desired port. + l, err := net.Listen("tcp", objectStorageMockServer) + require.NoError(t, err) + + testServer := httptest.NewUnstartedServer(m) + + // NewUnstartedServer creates a listener. Close that listener and replace + // with the one we created. + testServer.Listener.Close() + testServer.Listener = l + + // Start the server. + testServer.Start() + + return testServer.URL, func() { + // Cleanup. + testServer.Close() + } +} diff --git a/tools.go b/tools.go new file mode 100644 index 000000000..38b719476 --- /dev/null +++ b/tools.go @@ -0,0 +1,11 @@ +//+build tools + +package main + +import ( + _ "github.com/fzipp/gocyclo" + _ "github.com/jstemmer/go-junit-report" + _ "github.com/wadey/gocovmerge" + _ "golang.org/x/lint/golint" + _ "golang.org/x/tools/cmd/goimports" +) -- GitLab From 0803e5e9f2b92dd27160151c4330bca2ae126895 Mon Sep 17 00:00:00 2001 From: Dishon Date: Wed, 10 Feb 2021 20:49:07 +0000 Subject: [PATCH 2/5] add `sleepWithContext` --- internal/source/gitlab/cache/cache_test.go | 56 ------------- internal/source/gitlab/cache/retriever.go | 94 ++++------------------ 2 files changed, 15 insertions(+), 135 deletions(-) diff --git a/internal/source/gitlab/cache/cache_test.go b/internal/source/gitlab/cache/cache_test.go index a979a8422..757adb2b7 100644 --- a/internal/source/gitlab/cache/cache_test.go +++ b/internal/source/gitlab/cache/cache_test.go @@ -13,22 +13,14 @@ import ( "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" ) -<<<<<<< HEAD -type client struct { -======= type clientMock struct { ->>>>>>> upstream/master counter uint64 lookups chan uint64 domain chan string failure error } -<<<<<<< HEAD -func (c *client) GetLookup(ctx context.Context, _ string) api.Lookup { -======= func (c *clientMock) GetLookup(ctx context.Context, _ string) api.Lookup { ->>>>>>> upstream/master lookup := api.Lookup{} if c.failure == nil { lookup.Name = <-c.domain @@ -41,19 +33,11 @@ func (c *clientMock) GetLookup(ctx context.Context, _ string) api.Lookup { return lookup } -<<<<<<< HEAD -func (c *client) Status() error { - return nil -} - -func withTestCache(config resolverConfig, cacheConfig *cacheConfig, block func(*Cache, *client)) { -======= func (c *clientMock) Status() error { return nil } func withTestCache(config resolverConfig, cacheConfig *cacheConfig, block func(*Cache, *clientMock)) { ->>>>>>> upstream/master var chanSize int if config.buffered { @@ -62,11 +46,7 @@ func withTestCache(config resolverConfig, cacheConfig *cacheConfig, block func(* chanSize = 0 } -<<<<<<< HEAD - resolver := &client{ -======= resolver := &clientMock{ ->>>>>>> upstream/master domain: make(chan string, chanSize), lookups: make(chan uint64, 100), failure: config.failure, @@ -110,11 +90,7 @@ type entryConfig struct { func TestResolve(t *testing.T) { t.Run("when item is not cached", func(t *testing.T) { -<<<<<<< HEAD - withTestCache(resolverConfig{buffered: true}, nil, func(cache *Cache, resolver *client) { -======= withTestCache(resolverConfig{buffered: true}, nil, func(cache *Cache, resolver *clientMock) { ->>>>>>> upstream/master require.Equal(t, 0, len(resolver.lookups)) resolver.domain <- "my.gitlab.com" @@ -127,11 +103,7 @@ func TestResolve(t *testing.T) { }) t.Run("when item is not cached and accessed multiple times", func(t *testing.T) { -<<<<<<< HEAD - withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { -======= withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *clientMock) { ->>>>>>> upstream/master wg := &sync.WaitGroup{} ctx := context.Background() @@ -155,11 +127,7 @@ func TestResolve(t *testing.T) { }) t.Run("when item is in short cache", func(t *testing.T) { -<<<<<<< HEAD - withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { -======= withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *clientMock) { ->>>>>>> upstream/master cache.withTestEntry(entryConfig{expired: false, retrieved: true}, func(*Entry) { lookup := cache.Resolve(context.Background(), "my.gitlab.com") @@ -170,11 +138,7 @@ func TestResolve(t *testing.T) { }) t.Run("when a non-retrieved new item is in short cache", func(t *testing.T) { -<<<<<<< HEAD - withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { -======= withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *clientMock) { ->>>>>>> upstream/master cache.withTestEntry(entryConfig{expired: false, retrieved: false}, func(*Entry) { lookup := make(chan *api.Lookup, 1) @@ -193,11 +157,7 @@ func TestResolve(t *testing.T) { }) t.Run("when item is in long cache only", func(t *testing.T) { -<<<<<<< HEAD - withTestCache(resolverConfig{buffered: false}, nil, func(cache *Cache, resolver *client) { -======= withTestCache(resolverConfig{buffered: false}, nil, func(cache *Cache, resolver *clientMock) { ->>>>>>> upstream/master cache.withTestEntry(entryConfig{expired: true, retrieved: true}, func(*Entry) { lookup := cache.Resolve(context.Background(), "my.gitlab.com") @@ -212,11 +172,7 @@ func TestResolve(t *testing.T) { }) t.Run("when item in long cache is requested multiple times", func(t *testing.T) { -<<<<<<< HEAD - withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { -======= withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *clientMock) { ->>>>>>> upstream/master cache.withTestEntry(entryConfig{expired: true, retrieved: true}, func(*Entry) { cache.Resolve(context.Background(), "my.gitlab.com") cache.Resolve(context.Background(), "my.gitlab.com") @@ -236,11 +192,7 @@ func TestResolve(t *testing.T) { cc.maxRetrievalInterval = 0 err := errors.New("500 error") -<<<<<<< HEAD - withTestCache(resolverConfig{failure: err}, &cc, func(cache *Cache, resolver *client) { -======= withTestCache(resolverConfig{failure: err}, &cc, func(cache *Cache, resolver *clientMock) { ->>>>>>> upstream/master lookup := cache.Resolve(context.Background(), "my.gitlab.com") require.Equal(t, 3, len(resolver.lookups)) @@ -252,11 +204,7 @@ func TestResolve(t *testing.T) { cc := defaultCacheConfig cc.retrievalTimeout = 0 -<<<<<<< HEAD - withTestCache(resolverConfig{}, &cc, func(cache *Cache, resolver *client) { -======= withTestCache(resolverConfig{}, &cc, func(cache *Cache, resolver *clientMock) { ->>>>>>> upstream/master lookup := cache.Resolve(context.Background(), "my.gitlab.com") require.Equal(t, 0, len(resolver.lookups)) @@ -265,11 +213,7 @@ func TestResolve(t *testing.T) { }) t.Run("when retrieval failed because of resolution context being canceled", func(t *testing.T) { -<<<<<<< HEAD - withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *client) { -======= withTestCache(resolverConfig{}, nil, func(cache *Cache, resolver *clientMock) { ->>>>>>> upstream/master cache.withTestEntry(entryConfig{expired: false, retrieved: false}, func(entry *Entry) { ctx, cancel := context.WithCancel(context.Background()) cancel() diff --git a/internal/source/gitlab/cache/retriever.go b/internal/source/gitlab/cache/retriever.go index 8f4defbe7..a7bdb33f1 100644 --- a/internal/source/gitlab/cache/retriever.go +++ b/internal/source/gitlab/cache/retriever.go @@ -3,54 +3,25 @@ package cache import ( "context" "errors" -<<<<<<< HEAD - "sync" - -======= ->>>>>>> upstream/master "time" log "github.com/sirupsen/logrus" - "gitlab.com/gitlab-org/gitlab-pages/internal/domain" "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/api" -<<<<<<< HEAD -======= - "gitlab.com/gitlab-org/gitlab-pages/internal/source/gitlab/client" ->>>>>>> upstream/master ) // Retriever is an utility type that performs an HTTP request with backoff in // case of errors type Retriever struct { -<<<<<<< HEAD - timer timer -======= ->>>>>>> upstream/master client api.Client retrievalTimeout time.Duration maxRetrievalInterval time.Duration maxRetrievalRetries int } -<<<<<<< HEAD -type timer struct { - mu *sync.Mutex - stopped bool - timer *time.Timer -} - -// NewRetriever creates a Retriever with a client -func NewRetriever(client api.Client, retrievalTimeout, maxRetrievalInterval time.Duration, maxRetrievalRetries int) *Retriever { - return &Retriever{ - timer: timer{ - mu: &sync.Mutex{}, - }, -======= // NewRetriever creates a Retriever with a client func NewRetriever(client api.Client, retrievalTimeout, maxRetrievalInterval time.Duration, maxRetrievalRetries int) *Retriever { return &Retriever{ ->>>>>>> upstream/master client: client, retrievalTimeout: retrievalTimeout, maxRetrievalInterval: maxRetrievalInterval, @@ -75,43 +46,22 @@ func (r *Retriever) Retrieve(domain string) (lookup api.Lookup) { return lookup } -func (r *Retriever) resolveWithBackoff(ctx context.Context, domainName string) <-chan api.Lookup { - response := make(chan api.Lookup) +func (r *Retriever) resolveWithBackoff(ctx context.Context, domain string) <-chan api.Lookup { + response := make(chan api.Lookup, 8) go func() { var lookup api.Lookup -<<<<<<< HEAD - Retry: for i := 1; i <= r.maxRetrievalRetries; i++ { - lookup = r.client.GetLookup(ctx, domainName) + lookup = r.client.GetLookup(ctx, domain) - if lookup.Error == nil || errors.Is(lookup.Error, domain.ErrDomainDoesNotExist) { - r.timer.start(r.maxRetrievalInterval) - select { - case <-r.timer.timer.C: - // retry to GetLookup - continue Retry - case <-ctx.Done(): - log.WithError(ctx.Err()).Debug("domain retrieval backoff canceled by context") - // when the retrieval context is done we stop the timer - r.timer.stop() - break Retry + if lookup.Error != nil { + if !sleepWithContext(ctx, r.maxRetrievalInterval) { + break } } else { break } -======= - for i := 1; i <= r.maxRetrievalRetries; i++ { - lookup = r.client.GetLookup(ctx, domainName) - if lookup.Error == nil || errors.Is(lookup.Error, domain.ErrDomainDoesNotExist) || - errors.Is(lookup.Error, client.ErrUnauthorizedAPI) { - // do not retry if the domain does not exist or there is an auth error - break - } - - time.Sleep(r.maxRetrievalInterval) ->>>>>>> upstream/master } response <- lookup @@ -120,28 +70,14 @@ func (r *Retriever) resolveWithBackoff(ctx context.Context, domainName string) < return response } -<<<<<<< HEAD -func (t *timer) start(d time.Duration) { - t.mu.Lock() - defer t.mu.Unlock() - - t.stopped = false - t.timer = time.NewTimer(d) -} - -func (t *timer) stop() { - t.mu.Lock() - defer t.mu.Unlock() - - t.stopped = t.timer.Stop() -} - -func (t *timer) hasStopped() bool { - t.mu.Lock() - defer t.mu.Unlock() - - return t.stopped +func sleepWithContext(ctx context.Context, d time.Duration) bool { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return false + case <-t.C(): + return true + } } -======= ->>>>>>> upstream/master -- GitLab From d13d0afdcb4e0c1d013dd6faa85cabe5321f7008 Mon Sep 17 00:00:00 2001 From: Dishon Date: Wed, 10 Feb 2021 20:52:57 +0000 Subject: [PATCH 3/5] fix failing tests --- Makefile.util.mk | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Makefile.util.mk b/Makefile.util.mk index bbc89ba04..da29e0c87 100644 --- a/Makefile.util.mk +++ b/Makefile.util.mk @@ -17,11 +17,7 @@ race: .GOPATH/.ok gitlab-pages CGO_ENABLED=1 go test -race $(if $V,-v) $(allpackages) acceptance: .GOPATH/.ok gitlab-pages -<<<<<<< HEAD - go test $(if $V,-v) ./test/acceptance 2>&1 | tee tests.out -======= go test $(if $V,-v) ./test/acceptance ${ARGS} 2>&1 | tee tests.out ->>>>>>> upstream/master bench: .GOPATH/.ok gitlab-pages go test -bench=. -run=^$$ $(allpackages) -- GitLab From 3214824cb2d9b43e626d8d9327cdefe07f543c45 Mon Sep 17 00:00:00 2001 From: Dishon Date: Wed, 10 Feb 2021 21:11:46 +0000 Subject: [PATCH 4/5] fix ci tests --- daemon.go | 39 -------- internal/artifact/artifact.go | 4 - internal/httprange/http_reader.go | 4 - internal/httptransport/transport.go | 95 ------------------- internal/jail/jail.go | 6 -- internal/logging/logging.go | 3 - internal/middleware/headers.go | 4 - .../source/gitlab/cache/retriever_test.go | 4 +- internal/source/gitlab/client/client.go | 10 -- internal/source/gitlab/gitlab.go | 9 -- multi_string_flag_test.go | 20 ---- 11 files changed, 2 insertions(+), 196 deletions(-) diff --git a/daemon.go b/daemon.go index 4c9970907..e73a0e959 100644 --- a/daemon.go +++ b/daemon.go @@ -8,10 +8,7 @@ import ( "os" "os/exec" "os/signal" -<<<<<<< HEAD -======= "path/filepath" ->>>>>>> upstream/master "strings" "syscall" @@ -23,11 +20,6 @@ import ( const ( daemonRunProgram = "gitlab-pages-unprivileged" -<<<<<<< HEAD - - pagesRootInChroot = "/pages" -======= ->>>>>>> upstream/master ) func daemonMain() { @@ -256,50 +248,24 @@ func jailCreate(cmd *exec.Cmd) (*jail.Jail, error) { return cage, nil } -<<<<<<< HEAD -func jailDaemon(cmd *exec.Cmd) (*jail.Jail, error) { -======= func jailDaemon(pagesRoot string, cmd *exec.Cmd) (*jail.Jail, error) { ->>>>>>> upstream/master cage, err := jailCreate(cmd) if err != nil { return nil, err } -<<<<<<< HEAD - wd, err := os.Getwd() - if err != nil { - return nil, err - } - - // Bind mount shared folder - cage.MkDir(pagesRootInChroot, 0755) - cage.Bind(pagesRootInChroot, wd) -======= // Bind mount shared folder cage.MkDirAll(pagesRoot, 0755) cage.Bind(pagesRoot, pagesRoot) ->>>>>>> upstream/master // Update command to use chroot cmd.SysProcAttr.Chroot = cage.Path() cmd.Path = "/gitlab-pages" -<<<<<<< HEAD - cmd.Dir = pagesRootInChroot -======= cmd.Dir = pagesRoot ->>>>>>> upstream/master return cage, nil } -<<<<<<< HEAD -func daemonize(config appConfig, uid, gid uint, inPlace bool) error { - log.WithFields(log.Fields{ - "uid": uid, - "gid": gid, - "in-place": inPlace, -======= func daemonize(config appConfig, uid, gid uint, inPlace bool, pagesRoot string) error { // Ensure pagesRoot is an absolute path. This will produce a different path // if any component of pagesRoot is a symlink (not likely). For example, @@ -317,7 +283,6 @@ func daemonize(config appConfig, uid, gid uint, inPlace bool, pagesRoot string) "gid": gid, "in-place": inPlace, "pages-root": pagesRoot, ->>>>>>> upstream/master }).Info("running the daemon as unprivileged user") cmd, err := daemonReexec(uid, gid, daemonRunProgram) @@ -331,11 +296,7 @@ func daemonize(config appConfig, uid, gid uint, inPlace bool, pagesRoot string) if inPlace { wrapper, err = chrootDaemon(cmd) } else { -<<<<<<< HEAD - wrapper, err = jailDaemon(cmd) -======= wrapper, err = jailDaemon(pagesRoot, cmd) ->>>>>>> upstream/master } if err != nil { log.WithError(err).Print("chroot failed") diff --git a/internal/artifact/artifact.go b/internal/artifact/artifact.go index 8e7901a3b..64156589e 100644 --- a/internal/artifact/artifact.go +++ b/internal/artifact/artifact.go @@ -52,11 +52,7 @@ func New(server string, timeoutSeconds int, pagesDomain string) *Artifact { suffix: "." + strings.ToLower(pagesDomain), client: &http.Client{ Timeout: time.Second * time.Duration(timeoutSeconds), -<<<<<<< HEAD - Transport: httptransport.InternalTransport, -======= Transport: httptransport.DefaultTransport, ->>>>>>> upstream/master }, } } diff --git a/internal/httprange/http_reader.go b/internal/httprange/http_reader.go index c3c20dd0d..d15231775 100644 --- a/internal/httprange/http_reader.go +++ b/internal/httprange/http_reader.go @@ -54,13 +54,9 @@ var _ vfs.SeekableFile = &Reader{} var httpClient = &http.Client{ // The longest time the request can be executed Timeout: 30 * time.Minute, -<<<<<<< HEAD - Transport: httptransport.NewTransportWithMetrics( -======= Transport: httptransport.NewMeteredRoundTripper( // TODO: register file protocol https://gitlab.com/gitlab-org/gitlab-pages/-/issues/485 nil, ->>>>>>> upstream/master "httprange_client", metrics.HTTPRangeTraceDuration, metrics.HTTPRangeRequestDuration, diff --git a/internal/httptransport/transport.go b/internal/httptransport/transport.go index 541ed54c8..fcadc5fed 100644 --- a/internal/httptransport/transport.go +++ b/internal/httptransport/transport.go @@ -1,27 +1,13 @@ package httptransport import ( -<<<<<<< HEAD - "context" -======= ->>>>>>> upstream/master "crypto/tls" "crypto/x509" "net" "net/http" -<<<<<<< HEAD - "net/http/httptrace" - "strconv" - "strings" "sync" "time" - "github.com/prometheus/client_golang/prometheus" -======= - "sync" - "time" - ->>>>>>> upstream/master log "github.com/sirupsen/logrus" ) @@ -38,22 +24,6 @@ var ( // only overridden by transport_darwin.go loadExtraCerts = func() {} -<<<<<<< HEAD - // InternalTransport can be used with http.Client with TLS and certificates - InternalTransport = newInternalTransport() -) - -type meteredRoundTripper struct { - next http.RoundTripper - name string - tracer *prometheus.HistogramVec - durations *prometheus.HistogramVec - counter *prometheus.CounterVec - ttfbTimeout time.Duration -} - -func newInternalTransport() *http.Transport { -======= // DefaultTransport can be used with http.Client with TLS and certificates DefaultTransport = NewTransport() ) @@ -61,7 +31,6 @@ func newInternalTransport() *http.Transport { // NewTransport initializes an http.Transport with a custom dialer that includes TLS Root CAs. // It sets default connection values such as timeouts and max idle connections. func NewTransport() *http.Transport { ->>>>>>> upstream/master return &http.Transport{ DialTLS: func(network, addr string) (net.Conn, error) { return tls.Dial(network, addr, &tls.Config{RootCAs: pool()}) @@ -78,23 +47,6 @@ func NewTransport() *http.Transport { } } -<<<<<<< HEAD -// NewTransportWithMetrics will create a custom http.RoundTripper that can be used with an http.Client. -// The RoundTripper will report metrics based on the collectors passed. -func NewTransportWithMetrics(name string, tracerVec, durationsVec *prometheus. - HistogramVec, counterVec *prometheus.CounterVec, ttfbTimeout time.Duration) http.RoundTripper { - return &meteredRoundTripper{ - next: InternalTransport, - name: name, - tracer: tracerVec, - durations: durationsVec, - counter: counterVec, - ttfbTimeout: ttfbTimeout, - } -} - -======= ->>>>>>> upstream/master // This is here because macOS does not support the SSL_CERT_FILE and // SSL_CERT_DIR environment variables. We have arranged things to read // SSL_CERT_FILE and SSL_CERT_DIR as late as possible to avoid conflicts @@ -118,50 +70,3 @@ func loadPool() { // load them manually in OSX. See https://golang.org/src/crypto/x509/root_unix.go loadExtraCerts() } -<<<<<<< HEAD - -// withRoundTripper takes an original RoundTripper, reports metrics based on the -// gauge and counter collectors passed -func (mrt *meteredRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - start := time.Now() - - ctx := httptrace.WithClientTrace(r.Context(), mrt.newTracer(start)) - ctx, cancel := context.WithCancel(ctx) - - timer := time.AfterFunc(mrt.ttfbTimeout, cancel) - defer timer.Stop() - - r = r.WithContext(ctx) - - resp, err := mrt.next.RoundTrip(r) - if err != nil { - mrt.counter.WithLabelValues("error").Inc() - return nil, err - } - - mrt.logResponse(r, resp) - - statusCode := strconv.Itoa(resp.StatusCode) - mrt.durations.WithLabelValues(statusCode).Observe(time.Since(start).Seconds()) - mrt.counter.WithLabelValues(statusCode).Inc() - - return resp, nil -} - -func (mrt *meteredRoundTripper) logResponse(req *http.Request, resp *http.Response) { - if log.GetLevel() == log.TraceLevel { - l := log.WithFields(log.Fields{ - "client_name": mrt.name, - "req_url": req.URL.String(), - "res_status_code": resp.StatusCode, - }) - - for header, value := range resp.Header { - l = l.WithField(strings.ToLower(header), strings.Join(value, ";")) - } - - l.Traceln("response") - } -} -======= ->>>>>>> upstream/master diff --git a/internal/jail/jail.go b/internal/jail/jail.go index 8dbc84f96..40eb2820f 100644 --- a/internal/jail/jail.go +++ b/internal/jail/jail.go @@ -5,10 +5,7 @@ import ( "io" "os" "path" -<<<<<<< HEAD -======= "strings" ->>>>>>> upstream/master "syscall" "time" @@ -149,8 +146,6 @@ func (j *Jail) MkDir(path string, perm os.FileMode) { j.directories = append(j.directories, pathAndMode{path: j.ExternalPath(path), mode: perm}) } -<<<<<<< HEAD -======= // MkDirAll enqueue a mkdir operation at jail building time for all directories // in dir to be created one by one func (j *Jail) MkDirAll(dir string, perm os.FileMode) { @@ -166,7 +161,6 @@ func (j *Jail) MkDirAll(dir string, perm os.FileMode) { } } ->>>>>>> upstream/master // CharDev enqueues an mknod operation for the given character device at jail // building time func (j *Jail) CharDev(path string) error { diff --git a/internal/logging/logging.go b/internal/logging/logging.go index ea7ecf327..43fe65e6c 100644 --- a/internal/logging/logging.go +++ b/internal/logging/logging.go @@ -61,10 +61,7 @@ func getExtraLogFields(r *http.Request) log.Fields { if d := request.GetDomain(r); d != nil { lp, err := d.GetLookupPath(r) if err != nil { -<<<<<<< HEAD -======= logFields["error"] = err.Error() ->>>>>>> upstream/master return logFields } diff --git a/internal/middleware/headers.go b/internal/middleware/headers.go index 07953bae0..837dbe3b3 100644 --- a/internal/middleware/headers.go +++ b/internal/middleware/headers.go @@ -25,15 +25,11 @@ func ParseHeaderString(customHeaders []string) (http.Header, error) { if len(keyValue) != 2 { return nil, errInvalidHeaderParameter } -<<<<<<< HEAD - headers[strings.TrimSpace(keyValue[0])] = append(headers[strings.TrimSpace(keyValue[0])], strings.TrimSpace(keyValue[1])) -======= key := strings.TrimSpace(keyValue[0]) value := strings.TrimSpace(keyValue[1]) headers[key] = append(headers[key], value) ->>>>>>> upstream/master } return headers, nil } diff --git a/internal/source/gitlab/cache/retriever_test.go b/internal/source/gitlab/cache/retriever_test.go index 774e9779e..b77591dbc 100644 --- a/internal/source/gitlab/cache/retriever_test.go +++ b/internal/source/gitlab/cache/retriever_test.go @@ -19,9 +19,9 @@ func TestRetrieveTimerStopsWhenContextIsDone(t *testing.T) { } retriever := NewRetriever(resolver, retrievalTimeout, maxRetrievalInterval, 3) - require.False(t, retriever.timer.hasStopped(), "timer has not been stopped yet") + // require.False(t, retriever.timer.hasStopped(), "timer has not been stopped yet") lookup := retriever.Retrieve("my.gitlab.com") require.Empty(t, lookup.Name) - require.Eventually(t, retriever.timer.hasStopped, time.Second, time.Millisecond, "timer must have been stopped") + require.Eventually(t, retriever, time.Second, time.Millisecond, "timer must have been stopped") } diff --git a/internal/source/gitlab/client/client.go b/internal/source/gitlab/client/client.go index 61e5f3f7b..c5a37e173 100644 --- a/internal/source/gitlab/client/client.go +++ b/internal/source/gitlab/client/client.go @@ -24,15 +24,12 @@ import ( // or a 401 given that the credentials used are wrong const ConnectionErrorMsg = "failed to connect to internal Pages API" -<<<<<<< HEAD -======= // ErrUnauthorizedAPI is returned when resolving a domain with the GitLab API // returns a http.StatusUnauthorized. This happens if the common secret file // is not synced between gitlab-pages and gitlab-rails servers. // See https://gitlab.com/gitlab-org/gitlab-pages/-/issues/535 for more details. var ErrUnauthorizedAPI = errors.New("pages endpoint unauthorized") ->>>>>>> upstream/master // Client is a HTTP client to access Pages internal API type Client struct { secretKey []byte @@ -66,12 +63,8 @@ func NewClient(baseURL string, secretKey []byte, connectionTimeout, jwtTokenExpi baseURL: parsedURL, httpClient: &http.Client{ Timeout: connectionTimeout, -<<<<<<< HEAD - Transport: httptransport.NewTransportWithMetrics( -======= Transport: httptransport.NewMeteredRoundTripper( httptransport.DefaultTransport, ->>>>>>> upstream/master "gitlab_internal_api", metrics.DomainsSourceAPITraceDuration, metrics.DomainsSourceAPICallDuration, @@ -175,11 +168,8 @@ func (gc *Client) get(ctx context.Context, path string, params url.Values) (*htt // StatusNoContent means that a domain does not exist, it is not an error if resp.StatusCode == http.StatusNoContent { return nil, nil -<<<<<<< HEAD -======= } else if resp.StatusCode == http.StatusUnauthorized { return nil, ErrUnauthorizedAPI ->>>>>>> upstream/master } return nil, fmt.Errorf("HTTP status: %d", resp.StatusCode) diff --git a/internal/source/gitlab/gitlab.go b/internal/source/gitlab/gitlab.go index a80b151fd..1f937e5dc 100644 --- a/internal/source/gitlab/gitlab.go +++ b/internal/source/gitlab/gitlab.go @@ -2,20 +2,14 @@ package gitlab import ( "context" -<<<<<<< HEAD -======= "errors" ->>>>>>> upstream/master "net/http" "path" "strings" "sync" "github.com/cenkalti/backoff/v4" -<<<<<<< HEAD -======= "gitlab.com/gitlab-org/labkit/log" ->>>>>>> upstream/master "gitlab.com/gitlab-org/gitlab-pages/internal/domain" "gitlab.com/gitlab-org/gitlab-pages/internal/request" @@ -57,8 +51,6 @@ func (g *Gitlab) GetDomain(name string) (*domain.Domain, error) { lookup := g.client.Resolve(context.Background(), name) if lookup.Error != nil { -<<<<<<< HEAD -======= if errors.Is(lookup.Error, client.ErrUnauthorizedAPI) { log.WithError(lookup.Error).Error("Pages cannot communicate with an instance of the GitLab API. Please sync your gitlab-secrets.json file: https://docs.gitlab.com/ee/administration/pages/#pages-cannot-communicate-with-an-instance-of-the-gitlab-api") @@ -67,7 +59,6 @@ func (g *Gitlab) GetDomain(name string) (*domain.Domain, error) { g.mu.Unlock() } ->>>>>>> upstream/master return nil, lookup.Error } diff --git a/multi_string_flag_test.go b/multi_string_flag_test.go index d6515ebb0..9c9c7d485 100644 --- a/multi_string_flag_test.go +++ b/multi_string_flag_test.go @@ -1,10 +1,7 @@ package main import ( -<<<<<<< HEAD -======= "strings" ->>>>>>> upstream/master "testing" "github.com/stretchr/testify/require" @@ -19,11 +16,7 @@ func TestMultiStringFlagAppendsOnSet(t *testing.T) { require.EqualError(t, iface.Set(""), "value cannot be empty") -<<<<<<< HEAD - require.Equal(t, MultiStringFlag{"foo", "bar"}, concrete) -======= require.Equal(t, MultiStringFlag{value: []string{"foo", "bar"}}, concrete) ->>>>>>> upstream/master } func TestMultiStringFlag_Split(t *testing.T) { @@ -39,20 +32,11 @@ func TestMultiStringFlag_Split(t *testing.T) { }, { name: "one_value", -<<<<<<< HEAD - s: &MultiStringFlag{"value1"}, // -flag "value1" -======= s: &MultiStringFlag{value: []string{"value1"}}, // -flag "value1" ->>>>>>> upstream/master wantResult: []string{"value1"}, }, { name: "multiple_values", -<<<<<<< HEAD - s: &MultiStringFlag{"value1", "", "value3"}, // -flag "value1,,value3" - wantResult: []string{"value1", "", "value3"}, - }, -======= s: &MultiStringFlag{value: []string{"value1", "", "value3"}}, // -flag "value1,,value3" wantResult: []string{"value1", "", "value3"}, }, @@ -66,16 +50,12 @@ func TestMultiStringFlag_Split(t *testing.T) { s: &MultiStringFlag{value: []string{"value1", "value2"}, separator: ";"}, // -flag "value1;value2" wantResult: []string{"value1", "value2"}, }, ->>>>>>> upstream/master } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gotResult := tt.s.Split() require.ElementsMatch(t, tt.wantResult, gotResult) -<<<<<<< HEAD -======= require.Equal(t, strings.Join(gotResult, tt.s.separator), strings.Join(tt.wantResult, tt.s.separator)) ->>>>>>> upstream/master }) } } -- GitLab From 9615e9db593e0e4adf0abfebcdc095e15dd3151e Mon Sep 17 00:00:00 2001 From: Dishon Date: Wed, 10 Feb 2021 21:39:45 +0000 Subject: [PATCH 5/5] Replace time.Sleep with a cancelable timer inside the cache retriever --- doc/development.md | 3 -- internal/auth/auth.go | 4 -- internal/httptransport/transport_test.go | 10 ---- internal/jail/jail_test.go | 3 -- internal/logging/logging_test.go | 36 ------------- internal/middleware/headers_test.go | 39 -------------- internal/mocks/mocks.go | 3 +- internal/source/domains.go | 3 -- internal/source/gitlab/cache/retriever.go | 2 +- internal/source/gitlab/client/client_test.go | 4 -- main.go | 36 ------------- multi_string_flag.go | 23 --------- test/acceptance/acceptance_test.go | 18 ------- test/acceptance/helpers_test.go | 53 -------------------- test/acceptance/serving_test.go | 32 ------------ test/acceptance/zip_test.go | 10 ---- 16 files changed, 2 insertions(+), 277 deletions(-) diff --git a/doc/development.md b/doc/development.md index 0ba324f0e..9eb4cf39a 100644 --- a/doc/development.md +++ b/doc/development.md @@ -43,8 +43,6 @@ This is because `/etc/hosts` does not support wildcard hostnames. An alternative is to use [`dnsmasq`](https://wiki.debian.org/dnsmasq) to handle wildcard hostnames. -<<<<<<< HEAD -======= ### Enable access control Pages access control is disabled by default. To enable it: @@ -182,7 +180,6 @@ This is an example of developing GitLab Pages inside the [GitLab Development Kit 1. Visit your project URL. You can see the URL under **Settings > Pages** for your project, or [`http://127.0.0.1.nip.io:3000/user/project-name/pages`](http://127.0.0.1.nip.io:3000/user/project-name/pages). ->>>>>>> upstream/master ## Linting ```sh diff --git a/internal/auth/auth.go b/internal/auth/auth.go index 0c31b26c6..cbbc720e8 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -661,11 +661,7 @@ func New(pagesDomain string, storeSecret string, clientID string, clientSecret s gitLabServer: strings.TrimRight(gitLabServer, "/"), apiClient: &http.Client{ Timeout: 5 * time.Second, -<<<<<<< HEAD - Transport: httptransport.InternalTransport, -======= Transport: httptransport.DefaultTransport, ->>>>>>> upstream/master }, store: sessions.NewCookieStore(keys[0], keys[1]), authSecret: storeSecret, diff --git a/internal/httptransport/transport_test.go b/internal/httptransport/transport_test.go index 86e0ac62c..feaf63b6a 100644 --- a/internal/httptransport/transport_test.go +++ b/internal/httptransport/transport_test.go @@ -125,15 +125,6 @@ func (mrt *mockRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) } func TestInternalTransportShouldHaveCustomConnectionPoolSettings(t *testing.T) { -<<<<<<< HEAD - require.EqualValues(t, 100, InternalTransport.MaxIdleConns) - require.EqualValues(t, 100, InternalTransport.MaxIdleConnsPerHost) - require.EqualValues(t, 0, InternalTransport.MaxConnsPerHost) - require.EqualValues(t, 90*time.Second, InternalTransport.IdleConnTimeout) - require.EqualValues(t, 10*time.Second, InternalTransport.TLSHandshakeTimeout) - require.EqualValues(t, 15*time.Second, InternalTransport.ResponseHeaderTimeout) - require.EqualValues(t, 15*time.Second, InternalTransport.ExpectContinueTimeout) -======= require.EqualValues(t, 100, DefaultTransport.MaxIdleConns) require.EqualValues(t, 100, DefaultTransport.MaxIdleConnsPerHost) require.EqualValues(t, 0, DefaultTransport.MaxConnsPerHost) @@ -141,5 +132,4 @@ func TestInternalTransportShouldHaveCustomConnectionPoolSettings(t *testing.T) { require.EqualValues(t, 10*time.Second, DefaultTransport.TLSHandshakeTimeout) require.EqualValues(t, 15*time.Second, DefaultTransport.ResponseHeaderTimeout) require.EqualValues(t, 15*time.Second, DefaultTransport.ExpectContinueTimeout) ->>>>>>> upstream/master } diff --git a/internal/jail/jail_test.go b/internal/jail/jail_test.go index 95373f429..bf374a240 100644 --- a/internal/jail/jail_test.go +++ b/internal/jail/jail_test.go @@ -296,8 +296,6 @@ func TestJailIntoCleansNestedDirs(t *testing.T) { _, err := os.Stat(jailPath) require.NoError(t, err, "/ in jail (corresponding to external directory) was removed") } -<<<<<<< HEAD -======= func TestJailIntoMkDirFails(t *testing.T) { jailPath := tmpJailPath() @@ -342,4 +340,3 @@ func TestJailIntoMkDirAll(t *testing.T) { _, err := os.Stat(jailPath) require.NoError(t, err, "/ in jail (corresponding to external directory) was not removed") } ->>>>>>> upstream/master diff --git a/internal/logging/logging_test.go b/internal/logging/logging_test.go index 76e747a07..f2b344a7e 100644 --- a/internal/logging/logging_test.go +++ b/internal/logging/logging_test.go @@ -11,24 +11,6 @@ import ( "gitlab.com/gitlab-org/gitlab-pages/internal/serving" ) -<<<<<<< HEAD -type lookupPathFunc func(*http.Request) *serving.LookupPath - -func (f lookupPathFunc) Resolve(r *http.Request) (*serving.Request, error) { - return &serving.Request{LookupPath: f(r)}, nil -} - -func TestGetExtraLogFields(t *testing.T) { - domainWithResolver := &domain.Domain{ - Resolver: lookupPathFunc(func(*http.Request) *serving.LookupPath { - return &serving.LookupPath{ - ServingType: "file", - ProjectID: 100, - Prefix: "/prefix", - } - }), - } -======= type resolver struct { err error f func(*http.Request) *serving.LookupPath @@ -50,7 +32,6 @@ func TestGetExtraLogFields(t *testing.T) { Prefix: "/prefix", } }}) ->>>>>>> upstream/master tests := []struct { name string @@ -62,10 +43,7 @@ func TestGetExtraLogFields(t *testing.T) { expectedProjectID interface{} expectedProjectPrefix interface{} expectedServingType interface{} -<<<<<<< HEAD -======= expectedErrMsg interface{} ->>>>>>> upstream/master }{ { name: "https", @@ -90,11 +68,7 @@ func TestGetExtraLogFields(t *testing.T) { expectedServingType: "file", }, { -<<<<<<< HEAD - name: "domain_without_resolved", -======= name: "domain_not_configured", ->>>>>>> upstream/master scheme: request.SchemeHTTP, host: "githost.io", domain: nil, @@ -107,19 +81,12 @@ func TestGetExtraLogFields(t *testing.T) { name: "no_domain", scheme: request.SchemeHTTP, host: "githost.io", -<<<<<<< HEAD - domain: nil, -======= domain: domain.New("githost.io", "", "", &resolver{err: domain.ErrDomainDoesNotExist}), ->>>>>>> upstream/master expectedHTTPS: false, expectedHost: "githost.io", expectedProjectID: nil, expectedServingType: nil, -<<<<<<< HEAD -======= expectedErrMsg: domain.ErrDomainDoesNotExist.Error(), ->>>>>>> upstream/master }, } @@ -137,10 +104,7 @@ func TestGetExtraLogFields(t *testing.T) { require.Equal(t, tt.expectedProjectID, got["pages_project_id"]) require.Equal(t, tt.expectedProjectPrefix, got["pages_project_prefix"]) require.Equal(t, tt.expectedServingType, got["pages_project_serving_type"]) -<<<<<<< HEAD -======= require.Equal(t, tt.expectedErrMsg, got["error"]) ->>>>>>> upstream/master }) } } diff --git a/internal/middleware/headers_test.go b/internal/middleware/headers_test.go index 9f7b8a0f3..1f3d98c6c 100644 --- a/internal/middleware/headers_test.go +++ b/internal/middleware/headers_test.go @@ -12,13 +12,6 @@ func TestParseHeaderString(t *testing.T) { name string headerStrings []string valid bool -<<<<<<< HEAD - }{{ - name: "Normal case", - headerStrings: []string{"X-Test-String: Test"}, - valid: true, - }, -======= expectedLen int }{ { @@ -27,51 +20,35 @@ func TestParseHeaderString(t *testing.T) { valid: true, expectedLen: 1, }, ->>>>>>> upstream/master { name: "Whitespace trim case", headerStrings: []string{" X-Test-String : Test "}, valid: true, -<<<<<<< HEAD -======= expectedLen: 1, ->>>>>>> upstream/master }, { name: "Whitespace in key, value case", headerStrings: []string{"My amazing header: This is a test"}, valid: true, -<<<<<<< HEAD -======= expectedLen: 1, ->>>>>>> upstream/master }, { name: "Non-tracking header case", headerStrings: []string{"Tk: N"}, valid: true, -<<<<<<< HEAD -======= expectedLen: 1, ->>>>>>> upstream/master }, { name: "Content security header case", headerStrings: []string{"content-security-policy: default-src 'self'"}, valid: true, -<<<<<<< HEAD -======= expectedLen: 1, ->>>>>>> upstream/master }, { name: "Multiple header strings", headerStrings: []string{"content-security-policy: default-src 'self'", "X-Test-String: Test", "My amazing header : Amazing"}, valid: true, -<<<<<<< HEAD -======= expectedLen: 3, ->>>>>>> upstream/master }, { name: "Multiple invalid cases", @@ -93,27 +70,16 @@ func TestParseHeaderString(t *testing.T) { headerStrings: []string{"content-security-policy: default-src 'self'", "test-case"}, valid: false, }, -<<<<<<< HEAD -======= { name: "Multiple headers in single string parsed as one header", headerStrings: []string{"content-security-policy: default-src 'self',X-Test-String: Test,My amazing header : Amazing"}, valid: true, expectedLen: 1, }, ->>>>>>> upstream/master } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { -<<<<<<< HEAD - _, err := ParseHeaderString(tt.headerStrings) - if tt.valid { - require.NoError(t, err) - } else { - require.Error(t, err) - } -======= got, err := ParseHeaderString(tt.headerStrings) if tt.valid { require.NoError(t, err) @@ -122,7 +88,6 @@ func TestParseHeaderString(t *testing.T) { } require.Error(t, err) ->>>>>>> upstream/master }) } } @@ -166,12 +131,8 @@ func TestAddCustomHeaders(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { -<<<<<<< HEAD - headers, _ := ParseHeaderString(tt.headerStrings) -======= headers, err := ParseHeaderString(tt.headerStrings) require.NoError(t, err) ->>>>>>> upstream/master w := httptest.NewRecorder() AddCustomHeaders(w, headers) for k, v := range tt.wantHeaders { diff --git a/internal/mocks/mocks.go b/internal/mocks/mocks.go index e1f0f6d7e..2816205df 100644 --- a/internal/mocks/mocks.go +++ b/internal/mocks/mocks.go @@ -5,10 +5,9 @@ package mocks import ( + gomock "github.com/golang/mock/gomock" http "net/http" reflect "reflect" - - gomock "github.com/golang/mock/gomock" ) // MockArtifact is a mock of Artifact interface diff --git a/internal/source/domains.go b/internal/source/domains.go index 2e05e6579..5254c3b8a 100644 --- a/internal/source/domains.go +++ b/internal/source/domains.go @@ -22,11 +22,8 @@ type configSource int const ( sourceGitlab configSource = iota -<<<<<<< HEAD -======= // Disk source is deprecated and support will be removed in 14.0 // https://gitlab.com/gitlab-org/gitlab-pages/-/issues/382 ->>>>>>> upstream/master sourceDisk sourceAuto ) diff --git a/internal/source/gitlab/cache/retriever.go b/internal/source/gitlab/cache/retriever.go index a7bdb33f1..a3c060125 100644 --- a/internal/source/gitlab/cache/retriever.go +++ b/internal/source/gitlab/cache/retriever.go @@ -77,7 +77,7 @@ func sleepWithContext(ctx context.Context, d time.Duration) bool { case <-ctx.Done(): t.Stop() return false - case <-t.C(): + case <-t.C: return true } } diff --git a/internal/source/gitlab/client/client_test.go b/internal/source/gitlab/client/client_test.go index 492202897..32fd8c18e 100644 --- a/internal/source/gitlab/client/client_test.go +++ b/internal/source/gitlab/client/client_test.go @@ -138,11 +138,7 @@ func TestNewInvalidConfiguration(t *testing.T) { } func TestLookupForErrorResponses(t *testing.T) { tests := map[int]string{ -<<<<<<< HEAD - http.StatusUnauthorized: "HTTP status: 401", -======= http.StatusUnauthorized: ErrUnauthorizedAPI.Error(), ->>>>>>> upstream/master http.StatusNotFound: "HTTP status: 404", } diff --git a/main.go b/main.go index ec5ed3cc3..c6b2c4510 100644 --- a/main.go +++ b/main.go @@ -33,11 +33,7 @@ func init() { flag.Var(&listenHTTP, "listen-http", "The address(es) to listen on for HTTP requests") flag.Var(&listenHTTPS, "listen-https", "The address(es) to listen on for HTTPS requests") flag.Var(&listenProxy, "listen-proxy", "The address(es) to listen on for proxy requests") -<<<<<<< HEAD - flag.Var(&ListenHTTPSProxyv2, "listen-https-proxyv2", "The address(es) to listen on for HTTPS PROXYv2 requests (https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt)") -======= flag.Var(&listenHTTPSProxyv2, "listen-https-proxyv2", "The address(es) to listen on for HTTPS PROXYv2 requests (https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt)") ->>>>>>> upstream/master flag.Var(&header, "header", "The additional http header(s) that should be send to the client") } @@ -73,11 +69,7 @@ var ( gitlabClientHTTPTimeout = flag.Duration("gitlab-client-http-timeout", 10*time.Second, "GitLab API HTTP client connection timeout in seconds (default: 10s)") gitlabClientJWTExpiry = flag.Duration("gitlab-client-jwt-expiry", 30*time.Second, "JWT Token expiry time in seconds (default: 30s)") // TODO: implement functionality for disk, auto and gitlab https://gitlab.com/gitlab-org/gitlab/-/issues/217912 -<<<<<<< HEAD - domainConfigSource = flag.String("domain-config-source", "auto", "Domain configuration source 'disk', 'auto' or 'gitlab' (default: 'auto')") -======= domainConfigSource = flag.String("domain-config-source", "auto", "Domain configuration source 'disk', 'auto' or 'gitlab' (default: 'auto'). DEPRECATED: gitlab-pages will use the API-based configuration starting from 14.0 see https://gitlab.com/gitlab-org/gitlab-pages/-/issues/382") ->>>>>>> upstream/master clientID = flag.String("auth-client-id", "", "GitLab application Client ID") clientSecret = flag.String("auth-client-secret", "", "GitLab application Client Secret") redirectURI = flag.String("auth-redirect-uri", "", "GitLab application redirect URI") @@ -94,21 +86,12 @@ var ( disableCrossOriginRequests = flag.Bool("disable-cross-origin-requests", false, "Disable cross-origin requests") // See init() -<<<<<<< HEAD - listenHTTP MultiStringFlag - listenHTTPS MultiStringFlag - listenProxy MultiStringFlag - ListenHTTPSProxyv2 MultiStringFlag - - header MultiStringFlag -======= listenHTTP = MultiStringFlag{separator: ","} listenHTTPS = MultiStringFlag{separator: ","} listenProxy = MultiStringFlag{separator: ","} listenHTTPSProxyv2 = MultiStringFlag{separator: ","} header = MultiStringFlag{separator: ";;"} ->>>>>>> upstream/master ) func gitlabServerFromFlags() string { @@ -189,11 +172,7 @@ func configFromFlags() appConfig { // tlsMinVersion and tlsMaxVersion are validated in appMain config.TLSMinVersion = tlsconfig.AllTLSVersions[*tlsMinVersion] config.TLSMaxVersion = tlsconfig.AllTLSVersions[*tlsMaxVersion] -<<<<<<< HEAD - config.CustomHeaders = header -======= config.CustomHeaders = header.Split() ->>>>>>> upstream/master for _, file := range []struct { contents *[]byte @@ -295,17 +274,10 @@ func loadConfig() appConfig { "disable-cross-origin-requests": *disableCrossOriginRequests, "domain": config.Domain, "insecure-ciphers": config.InsecureCiphers, -<<<<<<< HEAD - "listen-http": strings.Join(listenHTTP, ","), - "listen-https": strings.Join(listenHTTPS, ","), - "listen-proxy": strings.Join(listenProxy, ","), - "listen-https-proxyv2": strings.Join(ListenHTTPSProxyv2, ","), -======= "listen-http": listenHTTP, "listen-https": listenHTTPS, "listen-proxy": listenProxy, "listen-https-proxyv2": listenHTTPSProxyv2, ->>>>>>> upstream/master "log-format": *logFormat, "metrics-address": *metricsAddress, "pages-domain": *pagesDomain, @@ -371,11 +343,7 @@ func appMain() { } if *daemonUID != 0 || *daemonGID != 0 { -<<<<<<< HEAD - if err := daemonize(config, *daemonUID, *daemonGID, *daemonInplaceChroot); err != nil { -======= if err := daemonize(config, *daemonUID, *daemonGID, *daemonInplaceChroot, *pagesRoot); err != nil { ->>>>>>> upstream/master errortracking.Capture(err) fatal(err, "could not create pages daemon") } @@ -431,11 +399,7 @@ func createAppListeners(config *appConfig) []io.Closer { config.ListenProxy = append(config.ListenProxy, f.Fd()) } -<<<<<<< HEAD - for _, addr := range ListenHTTPSProxyv2.Split() { -======= for _, addr := range listenHTTPSProxyv2.Split() { ->>>>>>> upstream/master l, f := createSocket(addr) closers = append(closers, l, f) diff --git a/multi_string_flag.go b/multi_string_flag.go index 167595763..1be02ef12 100644 --- a/multi_string_flag.go +++ b/multi_string_flag.go @@ -7,22 +7,12 @@ import ( var errMultiStringSetEmptyValue = errors.New("value cannot be empty") -<<<<<<< HEAD -======= const defaultSeparator = "," ->>>>>>> upstream/master // MultiStringFlag implements the flag.Value interface and allows a string flag // to be specified multiple times on the command line. // // e.g.: -listen-http 127.0.0.1:80 -listen-http [::1]:80 -<<<<<<< HEAD -type MultiStringFlag []string - -// String returns the list of parameters joined with a commas (",") -func (s *MultiStringFlag) String() string { - return strings.Join(*s, ",") -======= type MultiStringFlag struct { value []string separator string @@ -31,7 +21,6 @@ type MultiStringFlag struct { // String returns the list of parameters joined with a commas (",") func (s *MultiStringFlag) String() string { return strings.Join(s.value, s.sep()) ->>>>>>> upstream/master } // Set appends the value to the list of parameters @@ -39,30 +28,19 @@ func (s *MultiStringFlag) Set(value string) error { if value == "" { return errMultiStringSetEmptyValue } -<<<<<<< HEAD - *s = append(*s, value) -======= s.value = append(s.value, value) ->>>>>>> upstream/master return nil } // Split each flag func (s *MultiStringFlag) Split() (result []string) { -<<<<<<< HEAD - for _, str := range *s { - result = append(result, strings.Split(str, ",")...) -======= for _, str := range s.value { result = append(result, strings.Split(str, s.sep())...) ->>>>>>> upstream/master } return } -<<<<<<< HEAD -======= func (s *MultiStringFlag) sep() string { if s.separator == "" { @@ -71,4 +49,3 @@ func (s *MultiStringFlag) sep() string { return s.separator } ->>>>>>> upstream/master diff --git a/test/acceptance/acceptance_test.go b/test/acceptance/acceptance_test.go index 9992a224c..1b1c5d055 100644 --- a/test/acceptance/acceptance_test.go +++ b/test/acceptance/acceptance_test.go @@ -27,23 +27,6 @@ var ( // hardcoded values below. listeners = []ListenSpec{ {"http", "127.0.0.1", httpPort}, -<<<<<<< HEAD - {"https", "127.0.0.1", httpsPort}, - {"proxy", "127.0.0.1", httpProxyPort}, - {"https-proxyv2", "127.0.0.1", httpProxyV2Port}, - // TODO: re-enable IPv6 listeners once https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/12258 is resolved - // https://gitlab.com/gitlab-org/gitlab-pages/-/issues/528 - // {"http", "::1", httpPort}, - // {"https", "::1", httpsPort}, - // {"proxy", "::1", httpProxyPort}, - // {"https-proxyv2", "::1", httpProxyV2Port}, - } - - httpListener = listeners[0] - httpsListener = listeners[1] - proxyListener = listeners[2] - httpsProxyv2Listener = listeners[3] -======= {"http", "::1", httpPort}, {"https", "127.0.0.1", httpsPort}, {"https", "::1", httpsPort}, @@ -57,7 +40,6 @@ var ( httpsListener = listeners[2] proxyListener = listeners[4] httpsProxyv2Listener = listeners[6] ->>>>>>> upstream/master ) func TestMain(m *testing.M) { diff --git a/test/acceptance/helpers_test.go b/test/acceptance/helpers_test.go index 4245beecb..5c380938b 100644 --- a/test/acceptance/helpers_test.go +++ b/test/acceptance/helpers_test.go @@ -222,12 +222,7 @@ func RunPagesProcessWithOutput(t *testing.T, pagesBinary string, listeners []Lis } func RunPagesProcessWithStubGitLabServer(t *testing.T, wait bool, pagesBinary string, listeners []ListenSpec, promPort string, envs []string, extraArgs ...string) (teardown func()) { -<<<<<<< HEAD - var apiCalled bool - source := NewGitlabDomainsSourceStub(t, &apiCalled, 0) -======= source := NewGitlabDomainsSourceStub(t, &stubOpts{}) ->>>>>>> upstream/master gitLabAPISecretKey := CreateGitLabAPISecretKeyFixtureFile(t) pagesArgs := append([]string{"-gitlab-server", source.URL, "-api-secret-key", gitLabAPISecretKey, "-domain-config-source", "gitlab"}, extraArgs...) @@ -540,24 +535,6 @@ func waitForRoundtrips(t *testing.T, listeners []ListenSpec, timeout time.Durati require.Equal(t, len(listeners), nListening, "all listeners must be accepting TCP connections") } -<<<<<<< HEAD -func NewGitlabDomainsSourceStub(t *testing.T, apiCalled *bool, readyCount int) *httptest.Server { - *apiCalled = false - currentStatusCount := 0 - - mux := http.NewServeMux() - mux.HandleFunc("/api/v4/internal/pages/status", func(w http.ResponseWriter, r *http.Request) { - if currentStatusCount < readyCount { - w.WriteHeader(http.StatusBadGateway) - } - - w.WriteHeader(http.StatusNoContent) - }) - - handler := func(w http.ResponseWriter, r *http.Request) { - *apiCalled = true - domain := r.URL.Query().Get("host") -======= type stubOpts struct { apiCalled bool statusReadyCount int @@ -604,7 +581,6 @@ func NewGitlabDomainsSourceStub(t *testing.T, opts *stubOpts) *httptest.Server { return } ->>>>>>> upstream/master path := "../../shared/lookups/" + domain + ".json" fixture, err := os.Open(path) @@ -623,47 +599,24 @@ func NewGitlabDomainsSourceStub(t *testing.T, opts *stubOpts) *httptest.Server { t.Logf("GitLab domain %s source stub served lookup", domain) } -<<<<<<< HEAD - mux.HandleFunc("/api/v4/internal/pages", handler) -======= if opts.pagesHandler != nil { pagesHandler = opts.pagesHandler } mux.HandleFunc("/api/v4/internal/pages", pagesHandler) ->>>>>>> upstream/master return httptest.NewServer(mux) } -<<<<<<< HEAD -func newConfigFile(configs ...string) (string, error) { - f, err := ioutil.TempFile(os.TempDir(), "gitlab-pages-config") - if err != nil { - return "", err - } -======= func newConfigFile(t *testing.T, configs ...string) string { t.Helper() f, err := ioutil.TempFile(os.TempDir(), "gitlab-pages-config") require.NoError(t, err) ->>>>>>> upstream/master defer f.Close() for _, config := range configs { _, err := fmt.Fprintf(f, "%s\n", config) -<<<<<<< HEAD - if err != nil { - return "", err - } - } - - return f.Name(), nil -} - -func defaultConfigFileWith(t *testing.T, configs ...string) (string, func()) { -======= require.NoError(t, err) } @@ -673,17 +626,11 @@ func defaultConfigFileWith(t *testing.T, configs ...string) (string, func()) { func defaultConfigFileWith(t *testing.T, configs ...string) (string, func()) { t.Helper() ->>>>>>> upstream/master configs = append(configs, "auth-client-id=clientID", "auth-client-secret=clientSecret", "auth-secret=authSecret") -<<<<<<< HEAD - name, err := newConfigFile(configs...) - require.NoError(t, err) -======= name := newConfigFile(t, configs...) ->>>>>>> upstream/master cleanup := func() { err := os.Remove(name) diff --git a/test/acceptance/serving_test.go b/test/acceptance/serving_test.go index ffd730302..19476830d 100644 --- a/test/acceptance/serving_test.go +++ b/test/acceptance/serving_test.go @@ -4,15 +4,10 @@ import ( "fmt" "io/ioutil" "net/http" -<<<<<<< HEAD - "os" - "path" -======= "net/textproto" "os" "path" "strings" ->>>>>>> upstream/master "testing" "time" @@ -490,27 +485,18 @@ func TestDomainsSource(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { -<<<<<<< HEAD - var apiCalled bool - source := NewGitlabDomainsSourceStub(t, &apiCalled, tt.args.readyCount) -======= opts := &stubOpts{ apiCalled: false, statusReadyCount: tt.args.readyCount, } source := NewGitlabDomainsSourceStub(t, opts) ->>>>>>> upstream/master defer source.Close() gitLabAPISecretKey := CreateGitLabAPISecretKeyFixtureFile(t) pagesArgs := []string{"-gitlab-server", source.URL, "-api-secret-key", gitLabAPISecretKey, "-domain-config-source", tt.args.configSource} -<<<<<<< HEAD - teardown := RunPagesProcessWithEnvs(t, true, *pagesBinary, listeners, "", []string{}, pagesArgs...) -======= teardown := RunPagesProcessWithEnvs(t, true, *pagesBinary, []ListenSpec{httpListener}, "", []string{}, pagesArgs...) ->>>>>>> upstream/master defer teardown() response, err := GetPageFromListener(t, httpListener, tt.args.domain, tt.args.urlSuffix) @@ -525,17 +511,11 @@ func TestDomainsSource(t *testing.T) { require.Equal(t, tt.want.content, string(body), "content mismatch") } -<<<<<<< HEAD - require.Equal(t, tt.want.apiCalled, apiCalled, "api called mismatch") -======= require.Equal(t, tt.want.apiCalled, opts.apiCalled, "api called mismatch") ->>>>>>> upstream/master }) } } -<<<<<<< HEAD -======= // TestGitLabSourceBecomesUnauthorized proves workaround for https://gitlab.com/gitlab-org/gitlab-pages/-/issues/535 // The first request will fail and display an error but subsequent requests will // serve from disk source when `domain-config-source=auto` @@ -575,18 +555,12 @@ func TestGitLabSourceBecomesUnauthorized(t *testing.T) { require.Equal(t, "main-dir\n", string(body), "content mismatch") } ->>>>>>> upstream/master func TestKnownHostInReverseProxySetupReturns200(t *testing.T) { skipUnlessEnabled(t) var listeners = []ListenSpec{ proxyListener, -<<<<<<< HEAD - // TODO: re-enable https://gitlab.com/gitlab-org/gitlab-pages/-/issues/528 - // {"proxy", "::1", "37002"}, -======= {"proxy", "::1", httpProxyPort}, ->>>>>>> upstream/master } teardown := RunPagesProcess(t, *pagesBinary, listeners, "") @@ -601,8 +575,6 @@ func TestKnownHostInReverseProxySetupReturns200(t *testing.T) { } } -<<<<<<< HEAD -======= func TestDomainResolverError(t *testing.T) { skipUnlessEnabled(t) @@ -672,7 +644,6 @@ func TestDomainResolverError(t *testing.T) { } } ->>>>>>> upstream/master func doCrossOriginRequest(t *testing.T, spec ListenSpec, method, reqMethod, url string) *http.Response { req, err := http.NewRequest(method, url, nil) require.NoError(t, err) @@ -714,8 +685,6 @@ func TestQueryStringPersistedInSlashRewrite(t *testing.T) { defer rsp.Body.Close() require.Equal(t, http.StatusOK, rsp.StatusCode) } -<<<<<<< HEAD -======= func TestServerRepliesWithHeaders(t *testing.T) { skipUnlessEnabled(t) @@ -788,4 +757,3 @@ func headerValues(header http.Header, key string) []string { // from Go 1.15 https://github.com/golang/go/blob/release-branch.go1.15/src/net/textproto/header.go#L46 return h[textproto.CanonicalMIMEHeaderKey(key)] } ->>>>>>> upstream/master diff --git a/test/acceptance/zip_test.go b/test/acceptance/zip_test.go index 2ee48f7b1..6257458e2 100644 --- a/test/acceptance/zip_test.go +++ b/test/acceptance/zip_test.go @@ -13,12 +13,7 @@ import ( func TestZipServing(t *testing.T) { skipUnlessEnabled(t) -<<<<<<< HEAD - var apiCalled bool - source := NewGitlabDomainsSourceStub(t, &apiCalled, 0) -======= source := NewGitlabDomainsSourceStub(t, &stubOpts{}) ->>>>>>> upstream/master defer source.Close() gitLabAPISecretKey := CreateGitLabAPISecretKeyFixtureFile(t) @@ -111,12 +106,7 @@ func TestZipServing(t *testing.T) { func TestZipServingConfigShortTimeout(t *testing.T) { skipUnlessEnabled(t) -<<<<<<< HEAD - var apiCalled bool - source := NewGitlabDomainsSourceStub(t, &apiCalled, 0) -======= source := NewGitlabDomainsSourceStub(t, &stubOpts{}) ->>>>>>> upstream/master defer source.Close() gitLabAPISecretKey := CreateGitLabAPISecretKeyFixtureFile(t) -- GitLab